1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitAnd, visitOr, and visitXor functions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombine.h"
15 #include "llvm/Intrinsics.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Transforms/Utils/CmpInstAnalysis.h"
18 #include "llvm/Support/ConstantRange.h"
19 #include "llvm/Support/PatternMatch.h"
20 using namespace llvm;
21 using namespace PatternMatch;
22
23
24 /// AddOne - Add one to a ConstantInt.
AddOne(Constant * C)25 static Constant *AddOne(Constant *C) {
26 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
27 }
28 /// SubOne - Subtract one from a ConstantInt.
SubOne(ConstantInt * C)29 static Constant *SubOne(ConstantInt *C) {
30 return ConstantInt::get(C->getContext(), C->getValue()-1);
31 }
32
33 /// isFreeToInvert - Return true if the specified value is free to invert (apply
34 /// ~ to). This happens in cases where the ~ can be eliminated.
isFreeToInvert(Value * V)35 static inline bool isFreeToInvert(Value *V) {
36 // ~(~(X)) -> X.
37 if (BinaryOperator::isNot(V))
38 return true;
39
40 // Constants can be considered to be not'ed values.
41 if (isa<ConstantInt>(V))
42 return true;
43
44 // Compares can be inverted if they have a single use.
45 if (CmpInst *CI = dyn_cast<CmpInst>(V))
46 return CI->hasOneUse();
47
48 return false;
49 }
50
dyn_castNotVal(Value * V)51 static inline Value *dyn_castNotVal(Value *V) {
52 // If this is not(not(x)) don't return that this is a not: we want the two
53 // not's to be folded first.
54 if (BinaryOperator::isNot(V)) {
55 Value *Operand = BinaryOperator::getNotArgument(V);
56 if (!isFreeToInvert(Operand))
57 return Operand;
58 }
59
60 // Constants can be considered to be not'ed values...
61 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
62 return ConstantInt::get(C->getType(), ~C->getValue());
63 return 0;
64 }
65
66 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
67 /// predicate into a three bit mask. It also returns whether it is an ordered
68 /// predicate by reference.
getFCmpCode(FCmpInst::Predicate CC,bool & isOrdered)69 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
70 isOrdered = false;
71 switch (CC) {
72 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
73 case FCmpInst::FCMP_UNO: return 0; // 000
74 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
75 case FCmpInst::FCMP_UGT: return 1; // 001
76 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
77 case FCmpInst::FCMP_UEQ: return 2; // 010
78 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
79 case FCmpInst::FCMP_UGE: return 3; // 011
80 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
81 case FCmpInst::FCMP_ULT: return 4; // 100
82 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
83 case FCmpInst::FCMP_UNE: return 5; // 101
84 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
85 case FCmpInst::FCMP_ULE: return 6; // 110
86 // True -> 7
87 default:
88 // Not expecting FCMP_FALSE and FCMP_TRUE;
89 llvm_unreachable("Unexpected FCmp predicate!");
90 }
91 }
92
93 /// getNewICmpValue - This is the complement of getICmpCode, which turns an
94 /// opcode and two operands into either a constant true or false, or a brand
95 /// new ICmp instruction. The sign is passed in to determine which kind
96 /// of predicate to use in the new icmp instruction.
getNewICmpValue(bool Sign,unsigned Code,Value * LHS,Value * RHS,InstCombiner::BuilderTy * Builder)97 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
98 InstCombiner::BuilderTy *Builder) {
99 ICmpInst::Predicate NewPred;
100 if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
101 return NewConstant;
102 return Builder->CreateICmp(NewPred, LHS, RHS);
103 }
104
105 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
106 /// opcode and two operands into either a FCmp instruction. isordered is passed
107 /// in to determine which kind of predicate to use in the new fcmp instruction.
getFCmpValue(bool isordered,unsigned code,Value * LHS,Value * RHS,InstCombiner::BuilderTy * Builder)108 static Value *getFCmpValue(bool isordered, unsigned code,
109 Value *LHS, Value *RHS,
110 InstCombiner::BuilderTy *Builder) {
111 CmpInst::Predicate Pred;
112 switch (code) {
113 default: llvm_unreachable("Illegal FCmp code!");
114 case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
115 case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
116 case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
117 case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break;
118 case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
119 case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
120 case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
121 case 7:
122 if (!isordered) return ConstantInt::getTrue(LHS->getContext());
123 Pred = FCmpInst::FCMP_ORD; break;
124 }
125 return Builder->CreateFCmp(Pred, LHS, RHS);
126 }
127
128 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
129 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
130 // guaranteed to be a binary operator.
OptAndOp(Instruction * Op,ConstantInt * OpRHS,ConstantInt * AndRHS,BinaryOperator & TheAnd)131 Instruction *InstCombiner::OptAndOp(Instruction *Op,
132 ConstantInt *OpRHS,
133 ConstantInt *AndRHS,
134 BinaryOperator &TheAnd) {
135 Value *X = Op->getOperand(0);
136 Constant *Together = 0;
137 if (!Op->isShift())
138 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
139
140 switch (Op->getOpcode()) {
141 case Instruction::Xor:
142 if (Op->hasOneUse()) {
143 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
144 Value *And = Builder->CreateAnd(X, AndRHS);
145 And->takeName(Op);
146 return BinaryOperator::CreateXor(And, Together);
147 }
148 break;
149 case Instruction::Or:
150 if (Op->hasOneUse()){
151 if (Together != OpRHS) {
152 // (X | C1) & C2 --> (X | (C1&C2)) & C2
153 Value *Or = Builder->CreateOr(X, Together);
154 Or->takeName(Op);
155 return BinaryOperator::CreateAnd(Or, AndRHS);
156 }
157
158 ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
159 if (TogetherCI && !TogetherCI->isZero()){
160 // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
161 // NOTE: This reduces the number of bits set in the & mask, which
162 // can expose opportunities for store narrowing.
163 Together = ConstantExpr::getXor(AndRHS, Together);
164 Value *And = Builder->CreateAnd(X, Together);
165 And->takeName(Op);
166 return BinaryOperator::CreateOr(And, OpRHS);
167 }
168 }
169
170 break;
171 case Instruction::Add:
172 if (Op->hasOneUse()) {
173 // Adding a one to a single bit bit-field should be turned into an XOR
174 // of the bit. First thing to check is to see if this AND is with a
175 // single bit constant.
176 const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
177
178 // If there is only one bit set.
179 if (AndRHSV.isPowerOf2()) {
180 // Ok, at this point, we know that we are masking the result of the
181 // ADD down to exactly one bit. If the constant we are adding has
182 // no bits set below this bit, then we can eliminate the ADD.
183 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
184
185 // Check to see if any bits below the one bit set in AndRHSV are set.
186 if ((AddRHS & (AndRHSV-1)) == 0) {
187 // If not, the only thing that can effect the output of the AND is
188 // the bit specified by AndRHSV. If that bit is set, the effect of
189 // the XOR is to toggle the bit. If it is clear, then the ADD has
190 // no effect.
191 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
192 TheAnd.setOperand(0, X);
193 return &TheAnd;
194 } else {
195 // Pull the XOR out of the AND.
196 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
197 NewAnd->takeName(Op);
198 return BinaryOperator::CreateXor(NewAnd, AndRHS);
199 }
200 }
201 }
202 }
203 break;
204
205 case Instruction::Shl: {
206 // We know that the AND will not produce any of the bits shifted in, so if
207 // the anded constant includes them, clear them now!
208 //
209 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
210 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
211 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
212 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
213 AndRHS->getValue() & ShlMask);
214
215 if (CI->getValue() == ShlMask)
216 // Masking out bits that the shift already masks.
217 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
218
219 if (CI != AndRHS) { // Reducing bits set in and.
220 TheAnd.setOperand(1, CI);
221 return &TheAnd;
222 }
223 break;
224 }
225 case Instruction::LShr: {
226 // We know that the AND will not produce any of the bits shifted in, so if
227 // the anded constant includes them, clear them now! This only applies to
228 // unsigned shifts, because a signed shr may bring in set bits!
229 //
230 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
231 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
232 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
233 ConstantInt *CI = ConstantInt::get(Op->getContext(),
234 AndRHS->getValue() & ShrMask);
235
236 if (CI->getValue() == ShrMask)
237 // Masking out bits that the shift already masks.
238 return ReplaceInstUsesWith(TheAnd, Op);
239
240 if (CI != AndRHS) {
241 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
242 return &TheAnd;
243 }
244 break;
245 }
246 case Instruction::AShr:
247 // Signed shr.
248 // See if this is shifting in some sign extension, then masking it out
249 // with an and.
250 if (Op->hasOneUse()) {
251 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
252 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
253 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
254 Constant *C = ConstantInt::get(Op->getContext(),
255 AndRHS->getValue() & ShrMask);
256 if (C == AndRHS) { // Masking out bits shifted in.
257 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
258 // Make the argument unsigned.
259 Value *ShVal = Op->getOperand(0);
260 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
261 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
262 }
263 }
264 break;
265 }
266 return 0;
267 }
268
269
270 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
271 /// true, otherwise (V < Lo || V >= Hi). In practice, we emit the more efficient
272 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
273 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
274 /// insert new instructions.
InsertRangeTest(Value * V,Constant * Lo,Constant * Hi,bool isSigned,bool Inside)275 Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
276 bool isSigned, bool Inside) {
277 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
278 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
279 "Lo is not <= Hi in range emission code!");
280
281 if (Inside) {
282 if (Lo == Hi) // Trivially false.
283 return ConstantInt::getFalse(V->getContext());
284
285 // V >= Min && V < Hi --> V < Hi
286 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
287 ICmpInst::Predicate pred = (isSigned ?
288 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
289 return Builder->CreateICmp(pred, V, Hi);
290 }
291
292 // Emit V-Lo <u Hi-Lo
293 Constant *NegLo = ConstantExpr::getNeg(Lo);
294 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
295 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
296 return Builder->CreateICmpULT(Add, UpperBound);
297 }
298
299 if (Lo == Hi) // Trivially true.
300 return ConstantInt::getTrue(V->getContext());
301
302 // V < Min || V >= Hi -> V > Hi-1
303 Hi = SubOne(cast<ConstantInt>(Hi));
304 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
305 ICmpInst::Predicate pred = (isSigned ?
306 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
307 return Builder->CreateICmp(pred, V, Hi);
308 }
309
310 // Emit V-Lo >u Hi-1-Lo
311 // Note that Hi has already had one subtracted from it, above.
312 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
313 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
314 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
315 return Builder->CreateICmpUGT(Add, LowerBound);
316 }
317
318 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
319 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
320 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
321 // not, since all 1s are not contiguous.
isRunOfOnes(ConstantInt * Val,uint32_t & MB,uint32_t & ME)322 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
323 const APInt& V = Val->getValue();
324 uint32_t BitWidth = Val->getType()->getBitWidth();
325 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
326
327 // look for the first zero bit after the run of ones
328 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
329 // look for the first non-zero bit
330 ME = V.getActiveBits();
331 return true;
332 }
333
334 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
335 /// where isSub determines whether the operator is a sub. If we can fold one of
336 /// the following xforms:
337 ///
338 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
339 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
340 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
341 ///
342 /// return (A +/- B).
343 ///
FoldLogicalPlusAnd(Value * LHS,Value * RHS,ConstantInt * Mask,bool isSub,Instruction & I)344 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
345 ConstantInt *Mask, bool isSub,
346 Instruction &I) {
347 Instruction *LHSI = dyn_cast<Instruction>(LHS);
348 if (!LHSI || LHSI->getNumOperands() != 2 ||
349 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
350
351 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
352
353 switch (LHSI->getOpcode()) {
354 default: return 0;
355 case Instruction::And:
356 if (ConstantExpr::getAnd(N, Mask) == Mask) {
357 // If the AndRHS is a power of two minus one (0+1+), this is simple.
358 if ((Mask->getValue().countLeadingZeros() +
359 Mask->getValue().countPopulation()) ==
360 Mask->getValue().getBitWidth())
361 break;
362
363 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
364 // part, we don't need any explicit masks to take them out of A. If that
365 // is all N is, ignore it.
366 uint32_t MB = 0, ME = 0;
367 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
368 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
369 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
370 if (MaskedValueIsZero(RHS, Mask))
371 break;
372 }
373 }
374 return 0;
375 case Instruction::Or:
376 case Instruction::Xor:
377 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
378 if ((Mask->getValue().countLeadingZeros() +
379 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
380 && ConstantExpr::getAnd(N, Mask)->isNullValue())
381 break;
382 return 0;
383 }
384
385 if (isSub)
386 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
387 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
388 }
389
390 /// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C)
391 /// One of A and B is considered the mask, the other the value. This is
392 /// described as the "AMask" or "BMask" part of the enum. If the enum
393 /// contains only "Mask", then both A and B can be considered masks.
394 /// If A is the mask, then it was proven, that (A & C) == C. This
395 /// is trivial if C == A, or C == 0. If both A and C are constants, this
396 /// proof is also easy.
397 /// For the following explanations we assume that A is the mask.
398 /// The part "AllOnes" declares, that the comparison is true only
399 /// if (A & B) == A, or all bits of A are set in B.
400 /// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes
401 /// The part "AllZeroes" declares, that the comparison is true only
402 /// if (A & B) == 0, or all bits of A are cleared in B.
403 /// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes
404 /// The part "Mixed" declares, that (A & B) == C and C might or might not
405 /// contain any number of one bits and zero bits.
406 /// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed
407 /// The Part "Not" means, that in above descriptions "==" should be replaced
408 /// by "!=".
409 /// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes
410 /// If the mask A contains a single bit, then the following is equivalent:
411 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
412 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
413 enum MaskedICmpType {
414 FoldMskICmp_AMask_AllOnes = 1,
415 FoldMskICmp_AMask_NotAllOnes = 2,
416 FoldMskICmp_BMask_AllOnes = 4,
417 FoldMskICmp_BMask_NotAllOnes = 8,
418 FoldMskICmp_Mask_AllZeroes = 16,
419 FoldMskICmp_Mask_NotAllZeroes = 32,
420 FoldMskICmp_AMask_Mixed = 64,
421 FoldMskICmp_AMask_NotMixed = 128,
422 FoldMskICmp_BMask_Mixed = 256,
423 FoldMskICmp_BMask_NotMixed = 512
424 };
425
426 /// return the set of pattern classes (from MaskedICmpType)
427 /// that (icmp SCC (A & B), C) satisfies
getTypeOfMaskedICmp(Value * A,Value * B,Value * C,ICmpInst::Predicate SCC)428 static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
429 ICmpInst::Predicate SCC)
430 {
431 ConstantInt *ACst = dyn_cast<ConstantInt>(A);
432 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
433 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
434 bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
435 bool icmp_abit = (ACst != 0 && !ACst->isZero() &&
436 ACst->getValue().isPowerOf2());
437 bool icmp_bbit = (BCst != 0 && !BCst->isZero() &&
438 BCst->getValue().isPowerOf2());
439 unsigned result = 0;
440 if (CCst != 0 && CCst->isZero()) {
441 // if C is zero, then both A and B qualify as mask
442 result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
443 FoldMskICmp_Mask_AllZeroes |
444 FoldMskICmp_AMask_Mixed |
445 FoldMskICmp_BMask_Mixed)
446 : (FoldMskICmp_Mask_NotAllZeroes |
447 FoldMskICmp_Mask_NotAllZeroes |
448 FoldMskICmp_AMask_NotMixed |
449 FoldMskICmp_BMask_NotMixed));
450 if (icmp_abit)
451 result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes |
452 FoldMskICmp_AMask_NotMixed)
453 : (FoldMskICmp_AMask_AllOnes |
454 FoldMskICmp_AMask_Mixed));
455 if (icmp_bbit)
456 result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes |
457 FoldMskICmp_BMask_NotMixed)
458 : (FoldMskICmp_BMask_AllOnes |
459 FoldMskICmp_BMask_Mixed));
460 return result;
461 }
462 if (A == C) {
463 result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes |
464 FoldMskICmp_AMask_Mixed)
465 : (FoldMskICmp_AMask_NotAllOnes |
466 FoldMskICmp_AMask_NotMixed));
467 if (icmp_abit)
468 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
469 FoldMskICmp_AMask_NotMixed)
470 : (FoldMskICmp_Mask_AllZeroes |
471 FoldMskICmp_AMask_Mixed));
472 }
473 else if (ACst != 0 && CCst != 0 &&
474 ConstantExpr::getAnd(ACst, CCst) == CCst) {
475 result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
476 : FoldMskICmp_AMask_NotMixed);
477 }
478 if (B == C)
479 {
480 result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes |
481 FoldMskICmp_BMask_Mixed)
482 : (FoldMskICmp_BMask_NotAllOnes |
483 FoldMskICmp_BMask_NotMixed));
484 if (icmp_bbit)
485 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
486 FoldMskICmp_BMask_NotMixed)
487 : (FoldMskICmp_Mask_AllZeroes |
488 FoldMskICmp_BMask_Mixed));
489 }
490 else if (BCst != 0 && CCst != 0 &&
491 ConstantExpr::getAnd(BCst, CCst) == CCst) {
492 result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
493 : FoldMskICmp_BMask_NotMixed);
494 }
495 return result;
496 }
497
498 /// decomposeBitTestICmp - Decompose an icmp into the form ((X & Y) pred Z)
499 /// if possible. The returned predicate is either == or !=. Returns false if
500 /// decomposition fails.
decomposeBitTestICmp(const ICmpInst * I,ICmpInst::Predicate & Pred,Value * & X,Value * & Y,Value * & Z)501 static bool decomposeBitTestICmp(const ICmpInst *I, ICmpInst::Predicate &Pred,
502 Value *&X, Value *&Y, Value *&Z) {
503 // X < 0 is equivalent to (X & SignBit) != 0.
504 if (I->getPredicate() == ICmpInst::ICMP_SLT)
505 if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
506 if (C->isZero()) {
507 X = I->getOperand(0);
508 Y = ConstantInt::get(I->getContext(),
509 APInt::getSignBit(C->getBitWidth()));
510 Pred = ICmpInst::ICMP_NE;
511 Z = C;
512 return true;
513 }
514
515 // X > -1 is equivalent to (X & SignBit) == 0.
516 if (I->getPredicate() == ICmpInst::ICMP_SGT)
517 if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
518 if (C->isAllOnesValue()) {
519 X = I->getOperand(0);
520 Y = ConstantInt::get(I->getContext(),
521 APInt::getSignBit(C->getBitWidth()));
522 Pred = ICmpInst::ICMP_EQ;
523 Z = ConstantInt::getNullValue(C->getType());
524 return true;
525 }
526
527 return false;
528 }
529
530 /// foldLogOpOfMaskedICmpsHelper:
531 /// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
532 /// return the set of pattern classes (from MaskedICmpType)
533 /// that both LHS and RHS satisfy
foldLogOpOfMaskedICmpsHelper(Value * & A,Value * & B,Value * & C,Value * & D,Value * & E,ICmpInst * LHS,ICmpInst * RHS,ICmpInst::Predicate & LHSCC,ICmpInst::Predicate & RHSCC)534 static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
535 Value*& B, Value*& C,
536 Value*& D, Value*& E,
537 ICmpInst *LHS, ICmpInst *RHS,
538 ICmpInst::Predicate &LHSCC,
539 ICmpInst::Predicate &RHSCC) {
540 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0;
541 // vectors are not (yet?) supported
542 if (LHS->getOperand(0)->getType()->isVectorTy()) return 0;
543
544 // Here comes the tricky part:
545 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
546 // and L11 & L12 == L21 & L22. The same goes for RHS.
547 // Now we must find those components L** and R**, that are equal, so
548 // that we can extract the parameters A, B, C, D, and E for the canonical
549 // above.
550 Value *L1 = LHS->getOperand(0);
551 Value *L2 = LHS->getOperand(1);
552 Value *L11,*L12,*L21,*L22;
553 // Check whether the icmp can be decomposed into a bit test.
554 if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) {
555 L21 = L22 = L1 = 0;
556 } else {
557 // Look for ANDs in the LHS icmp.
558 if (match(L1, m_And(m_Value(L11), m_Value(L12)))) {
559 if (!match(L2, m_And(m_Value(L21), m_Value(L22))))
560 L21 = L22 = 0;
561 } else {
562 if (!match(L2, m_And(m_Value(L11), m_Value(L12))))
563 return 0;
564 std::swap(L1, L2);
565 L21 = L22 = 0;
566 }
567 }
568
569 // Bail if LHS was a icmp that can't be decomposed into an equality.
570 if (!ICmpInst::isEquality(LHSCC))
571 return 0;
572
573 Value *R1 = RHS->getOperand(0);
574 Value *R2 = RHS->getOperand(1);
575 Value *R11,*R12;
576 bool ok = false;
577 if (decomposeBitTestICmp(RHS, RHSCC, R11, R12, R2)) {
578 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
579 A = R11; D = R12;
580 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
581 A = R12; D = R11;
582 } else {
583 return 0;
584 }
585 E = R2; R1 = 0; ok = true;
586 } else if (match(R1, m_And(m_Value(R11), m_Value(R12)))) {
587 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
588 A = R11; D = R12; E = R2; ok = true;
589 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
590 A = R12; D = R11; E = R2; ok = true;
591 }
592 }
593
594 // Bail if RHS was a icmp that can't be decomposed into an equality.
595 if (!ICmpInst::isEquality(RHSCC))
596 return 0;
597
598 // Look for ANDs in on the right side of the RHS icmp.
599 if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) {
600 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
601 A = R11; D = R12; E = R1; ok = true;
602 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
603 A = R12; D = R11; E = R1; ok = true;
604 } else {
605 return 0;
606 }
607 }
608 if (!ok)
609 return 0;
610
611 if (L11 == A) {
612 B = L12; C = L2;
613 }
614 else if (L12 == A) {
615 B = L11; C = L2;
616 }
617 else if (L21 == A) {
618 B = L22; C = L1;
619 }
620 else if (L22 == A) {
621 B = L21; C = L1;
622 }
623
624 unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC);
625 unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC);
626 return left_type & right_type;
627 }
628 /// foldLogOpOfMaskedICmps:
629 /// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
630 /// into a single (icmp(A & X) ==/!= Y)
foldLogOpOfMaskedICmps(ICmpInst * LHS,ICmpInst * RHS,ICmpInst::Predicate NEWCC,llvm::InstCombiner::BuilderTy * Builder)631 static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS,
632 ICmpInst::Predicate NEWCC,
633 llvm::InstCombiner::BuilderTy* Builder) {
634 Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
635 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
636 unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS,
637 LHSCC, RHSCC);
638 if (mask == 0) return 0;
639 assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) &&
640 "foldLogOpOfMaskedICmpsHelper must return an equality predicate.");
641
642 if (NEWCC == ICmpInst::ICMP_NE)
643 mask >>= 1; // treat "Not"-states as normal states
644
645 if (mask & FoldMskICmp_Mask_AllZeroes) {
646 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
647 // -> (icmp eq (A & (B|D)), 0)
648 Value* newOr = Builder->CreateOr(B, D);
649 Value* newAnd = Builder->CreateAnd(A, newOr);
650 // we can't use C as zero, because we might actually handle
651 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
652 // with B and D, having a single bit set
653 Value* zero = Constant::getNullValue(A->getType());
654 return Builder->CreateICmp(NEWCC, newAnd, zero);
655 }
656 else if (mask & FoldMskICmp_BMask_AllOnes) {
657 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
658 // -> (icmp eq (A & (B|D)), (B|D))
659 Value* newOr = Builder->CreateOr(B, D);
660 Value* newAnd = Builder->CreateAnd(A, newOr);
661 return Builder->CreateICmp(NEWCC, newAnd, newOr);
662 }
663 else if (mask & FoldMskICmp_AMask_AllOnes) {
664 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
665 // -> (icmp eq (A & (B&D)), A)
666 Value* newAnd1 = Builder->CreateAnd(B, D);
667 Value* newAnd = Builder->CreateAnd(A, newAnd1);
668 return Builder->CreateICmp(NEWCC, newAnd, A);
669 }
670 else if (mask & FoldMskICmp_BMask_Mixed) {
671 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
672 // We already know that B & C == C && D & E == E.
673 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
674 // C and E, which are shared by both the mask B and the mask D, don't
675 // contradict, then we can transform to
676 // -> (icmp eq (A & (B|D)), (C|E))
677 // Currently, we only handle the case of B, C, D, and E being constant.
678 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
679 if (BCst == 0) return 0;
680 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
681 if (DCst == 0) return 0;
682 // we can't simply use C and E, because we might actually handle
683 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
684 // with B and D, having a single bit set
685
686 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
687 if (CCst == 0) return 0;
688 if (LHSCC != NEWCC)
689 CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
690 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
691 if (ECst == 0) return 0;
692 if (RHSCC != NEWCC)
693 ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
694 ConstantInt* MCst = dyn_cast<ConstantInt>(
695 ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst),
696 ConstantExpr::getXor(CCst, ECst)) );
697 // if there is a conflict we should actually return a false for the
698 // whole construct
699 if (!MCst->isZero())
700 return 0;
701 Value *newOr1 = Builder->CreateOr(B, D);
702 Value *newOr2 = ConstantExpr::getOr(CCst, ECst);
703 Value *newAnd = Builder->CreateAnd(A, newOr1);
704 return Builder->CreateICmp(NEWCC, newAnd, newOr2);
705 }
706 return 0;
707 }
708
709 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
FoldAndOfICmps(ICmpInst * LHS,ICmpInst * RHS)710 Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
711 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
712
713 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
714 if (PredicatesFoldable(LHSCC, RHSCC)) {
715 if (LHS->getOperand(0) == RHS->getOperand(1) &&
716 LHS->getOperand(1) == RHS->getOperand(0))
717 LHS->swapOperands();
718 if (LHS->getOperand(0) == RHS->getOperand(0) &&
719 LHS->getOperand(1) == RHS->getOperand(1)) {
720 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
721 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
722 bool isSigned = LHS->isSigned() || RHS->isSigned();
723 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
724 }
725 }
726
727 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
728 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_EQ, Builder))
729 return V;
730
731 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
732 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
733 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
734 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
735 if (LHSCst == 0 || RHSCst == 0) return 0;
736
737 if (LHSCst == RHSCst && LHSCC == RHSCC) {
738 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
739 // where C is a power of 2
740 if (LHSCC == ICmpInst::ICMP_ULT &&
741 LHSCst->getValue().isPowerOf2()) {
742 Value *NewOr = Builder->CreateOr(Val, Val2);
743 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
744 }
745
746 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
747 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
748 Value *NewOr = Builder->CreateOr(Val, Val2);
749 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
750 }
751 }
752
753 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
754 // where CMAX is the all ones value for the truncated type,
755 // iff the lower bits of C2 and CA are zero.
756 if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
757 LHS->hasOneUse() && RHS->hasOneUse()) {
758 Value *V;
759 ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0;
760
761 // (trunc x) == C1 & (and x, CA) == C2
762 if (match(Val2, m_Trunc(m_Value(V))) &&
763 match(Val, m_And(m_Specific(V), m_ConstantInt(AndCst)))) {
764 SmallCst = RHSCst;
765 BigCst = LHSCst;
766 }
767 // (and x, CA) == C2 & (trunc x) == C1
768 else if (match(Val, m_Trunc(m_Value(V))) &&
769 match(Val2, m_And(m_Specific(V), m_ConstantInt(AndCst)))) {
770 SmallCst = LHSCst;
771 BigCst = RHSCst;
772 }
773
774 if (SmallCst && BigCst) {
775 unsigned BigBitSize = BigCst->getType()->getBitWidth();
776 unsigned SmallBitSize = SmallCst->getType()->getBitWidth();
777
778 // Check that the low bits are zero.
779 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
780 if ((Low & AndCst->getValue()) == 0 && (Low & BigCst->getValue()) == 0) {
781 Value *NewAnd = Builder->CreateAnd(V, Low | AndCst->getValue());
782 APInt N = SmallCst->getValue().zext(BigBitSize) | BigCst->getValue();
783 Value *NewVal = ConstantInt::get(AndCst->getType()->getContext(), N);
784 return Builder->CreateICmp(LHSCC, NewAnd, NewVal);
785 }
786 }
787 }
788
789 // From here on, we only handle:
790 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
791 if (Val != Val2) return 0;
792
793 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
794 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
795 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
796 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
797 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
798 return 0;
799
800 // Make a constant range that's the intersection of the two icmp ranges.
801 // If the intersection is empty, we know that the result is false.
802 ConstantRange LHSRange =
803 ConstantRange::makeICmpRegion(LHSCC, LHSCst->getValue());
804 ConstantRange RHSRange =
805 ConstantRange::makeICmpRegion(RHSCC, RHSCst->getValue());
806
807 if (LHSRange.intersectWith(RHSRange).isEmptySet())
808 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
809
810 // We can't fold (ugt x, C) & (sgt x, C2).
811 if (!PredicatesFoldable(LHSCC, RHSCC))
812 return 0;
813
814 // Ensure that the larger constant is on the RHS.
815 bool ShouldSwap;
816 if (CmpInst::isSigned(LHSCC) ||
817 (ICmpInst::isEquality(LHSCC) &&
818 CmpInst::isSigned(RHSCC)))
819 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
820 else
821 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
822
823 if (ShouldSwap) {
824 std::swap(LHS, RHS);
825 std::swap(LHSCst, RHSCst);
826 std::swap(LHSCC, RHSCC);
827 }
828
829 // At this point, we know we have two icmp instructions
830 // comparing a value against two constants and and'ing the result
831 // together. Because of the above check, we know that we only have
832 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
833 // (from the icmp folding check above), that the two constants
834 // are not equal and that the larger constant is on the RHS
835 assert(LHSCst != RHSCst && "Compares not folded above?");
836
837 switch (LHSCC) {
838 default: llvm_unreachable("Unknown integer condition code!");
839 case ICmpInst::ICMP_EQ:
840 switch (RHSCC) {
841 default: llvm_unreachable("Unknown integer condition code!");
842 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
843 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
844 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
845 return LHS;
846 }
847 case ICmpInst::ICMP_NE:
848 switch (RHSCC) {
849 default: llvm_unreachable("Unknown integer condition code!");
850 case ICmpInst::ICMP_ULT:
851 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
852 return Builder->CreateICmpULT(Val, LHSCst);
853 break; // (X != 13 & X u< 15) -> no change
854 case ICmpInst::ICMP_SLT:
855 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
856 return Builder->CreateICmpSLT(Val, LHSCst);
857 break; // (X != 13 & X s< 15) -> no change
858 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
859 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
860 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
861 return RHS;
862 case ICmpInst::ICMP_NE:
863 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
864 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
865 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
866 return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1));
867 }
868 break; // (X != 13 & X != 15) -> no change
869 }
870 break;
871 case ICmpInst::ICMP_ULT:
872 switch (RHSCC) {
873 default: llvm_unreachable("Unknown integer condition code!");
874 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
875 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
876 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
877 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
878 break;
879 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
880 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
881 return LHS;
882 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
883 break;
884 }
885 break;
886 case ICmpInst::ICMP_SLT:
887 switch (RHSCC) {
888 default: llvm_unreachable("Unknown integer condition code!");
889 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
890 break;
891 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
892 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
893 return LHS;
894 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
895 break;
896 }
897 break;
898 case ICmpInst::ICMP_UGT:
899 switch (RHSCC) {
900 default: llvm_unreachable("Unknown integer condition code!");
901 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
902 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
903 return RHS;
904 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
905 break;
906 case ICmpInst::ICMP_NE:
907 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
908 return Builder->CreateICmp(LHSCC, Val, RHSCst);
909 break; // (X u> 13 & X != 15) -> no change
910 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
911 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
912 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
913 break;
914 }
915 break;
916 case ICmpInst::ICMP_SGT:
917 switch (RHSCC) {
918 default: llvm_unreachable("Unknown integer condition code!");
919 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
920 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
921 return RHS;
922 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
923 break;
924 case ICmpInst::ICMP_NE:
925 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
926 return Builder->CreateICmp(LHSCC, Val, RHSCst);
927 break; // (X s> 13 & X != 15) -> no change
928 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
929 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true);
930 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
931 break;
932 }
933 break;
934 }
935
936 return 0;
937 }
938
939 /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
940 /// instcombine, this returns a Value which should already be inserted into the
941 /// function.
FoldAndOfFCmps(FCmpInst * LHS,FCmpInst * RHS)942 Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
943 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
944 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
945 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
946 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
947 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
948 // If either of the constants are nans, then the whole thing returns
949 // false.
950 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
951 return ConstantInt::getFalse(LHS->getContext());
952 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
953 }
954
955 // Handle vector zeros. This occurs because the canonical form of
956 // "fcmp ord x,x" is "fcmp ord x, 0".
957 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
958 isa<ConstantAggregateZero>(RHS->getOperand(1)))
959 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
960 return 0;
961 }
962
963 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
964 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
965 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
966
967
968 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
969 // Swap RHS operands to match LHS.
970 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
971 std::swap(Op1LHS, Op1RHS);
972 }
973
974 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
975 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
976 if (Op0CC == Op1CC)
977 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
978 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
979 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
980 if (Op0CC == FCmpInst::FCMP_TRUE)
981 return RHS;
982 if (Op1CC == FCmpInst::FCMP_TRUE)
983 return LHS;
984
985 bool Op0Ordered;
986 bool Op1Ordered;
987 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
988 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
989 if (Op1Pred == 0) {
990 std::swap(LHS, RHS);
991 std::swap(Op0Pred, Op1Pred);
992 std::swap(Op0Ordered, Op1Ordered);
993 }
994 if (Op0Pred == 0) {
995 // uno && ueq -> uno && (uno || eq) -> ueq
996 // ord && olt -> ord && (ord && lt) -> olt
997 if (Op0Ordered == Op1Ordered)
998 return RHS;
999
1000 // uno && oeq -> uno && (ord && eq) -> false
1001 // uno && ord -> false
1002 if (!Op0Ordered)
1003 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
1004 // ord && ueq -> ord && (uno || eq) -> oeq
1005 return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder);
1006 }
1007 }
1008
1009 return 0;
1010 }
1011
1012
visitAnd(BinaryOperator & I)1013 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1014 bool Changed = SimplifyAssociativeOrCommutative(I);
1015 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1016
1017 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
1018 return ReplaceInstUsesWith(I, V);
1019
1020 // (A|B)&(A|C) -> A|(B&C) etc
1021 if (Value *V = SimplifyUsingDistributiveLaws(I))
1022 return ReplaceInstUsesWith(I, V);
1023
1024 // See if we can simplify any instructions used by the instruction whose sole
1025 // purpose is to compute bits we don't care about.
1026 if (SimplifyDemandedInstructionBits(I))
1027 return &I;
1028
1029 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1030 const APInt &AndRHSMask = AndRHS->getValue();
1031
1032 // Optimize a variety of ((val OP C1) & C2) combinations...
1033 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1034 Value *Op0LHS = Op0I->getOperand(0);
1035 Value *Op0RHS = Op0I->getOperand(1);
1036 switch (Op0I->getOpcode()) {
1037 default: break;
1038 case Instruction::Xor:
1039 case Instruction::Or: {
1040 // If the mask is only needed on one incoming arm, push it up.
1041 if (!Op0I->hasOneUse()) break;
1042
1043 APInt NotAndRHS(~AndRHSMask);
1044 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
1045 // Not masking anything out for the LHS, move to RHS.
1046 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
1047 Op0RHS->getName()+".masked");
1048 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
1049 }
1050 if (!isa<Constant>(Op0RHS) &&
1051 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
1052 // Not masking anything out for the RHS, move to LHS.
1053 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
1054 Op0LHS->getName()+".masked");
1055 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
1056 }
1057
1058 break;
1059 }
1060 case Instruction::Add:
1061 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
1062 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1063 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1064 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
1065 return BinaryOperator::CreateAnd(V, AndRHS);
1066 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
1067 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
1068 break;
1069
1070 case Instruction::Sub:
1071 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
1072 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1073 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1074 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
1075 return BinaryOperator::CreateAnd(V, AndRHS);
1076
1077 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
1078 // has 1's for all bits that the subtraction with A might affect.
1079 if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) {
1080 uint32_t BitWidth = AndRHSMask.getBitWidth();
1081 uint32_t Zeros = AndRHSMask.countLeadingZeros();
1082 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
1083
1084 if (MaskedValueIsZero(Op0LHS, Mask)) {
1085 Value *NewNeg = Builder->CreateNeg(Op0RHS);
1086 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
1087 }
1088 }
1089 break;
1090
1091 case Instruction::Shl:
1092 case Instruction::LShr:
1093 // (1 << x) & 1 --> zext(x == 0)
1094 // (1 >> x) & 1 --> zext(x == 0)
1095 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
1096 Value *NewICmp =
1097 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
1098 return new ZExtInst(NewICmp, I.getType());
1099 }
1100 break;
1101 }
1102
1103 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1104 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1105 return Res;
1106 }
1107
1108 // If this is an integer truncation, and if the source is an 'and' with
1109 // immediate, transform it. This frequently occurs for bitfield accesses.
1110 {
1111 Value *X = 0; ConstantInt *YC = 0;
1112 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1113 // Change: and (trunc (and X, YC) to T), C2
1114 // into : and (trunc X to T), trunc(YC) & C2
1115 // This will fold the two constants together, which may allow
1116 // other simplifications.
1117 Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk");
1118 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1119 C3 = ConstantExpr::getAnd(C3, AndRHS);
1120 return BinaryOperator::CreateAnd(NewCast, C3);
1121 }
1122 }
1123
1124 // Try to fold constant and into select arguments.
1125 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1126 if (Instruction *R = FoldOpIntoSelect(I, SI))
1127 return R;
1128 if (isa<PHINode>(Op0))
1129 if (Instruction *NV = FoldOpIntoPhi(I))
1130 return NV;
1131 }
1132
1133
1134 // (~A & ~B) == (~(A | B)) - De Morgan's Law
1135 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1136 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1137 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1138 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
1139 I.getName()+".demorgan");
1140 return BinaryOperator::CreateNot(Or);
1141 }
1142
1143 {
1144 Value *A = 0, *B = 0, *C = 0, *D = 0;
1145 // (A|B) & ~(A&B) -> A^B
1146 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1147 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1148 ((A == C && B == D) || (A == D && B == C)))
1149 return BinaryOperator::CreateXor(A, B);
1150
1151 // ~(A&B) & (A|B) -> A^B
1152 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1153 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1154 ((A == C && B == D) || (A == D && B == C)))
1155 return BinaryOperator::CreateXor(A, B);
1156
1157 // A&(A^B) => A & ~B
1158 {
1159 Value *tmpOp0 = Op0;
1160 Value *tmpOp1 = Op1;
1161 if (Op0->hasOneUse() &&
1162 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
1163 if (A == Op1 || B == Op1 ) {
1164 tmpOp1 = Op0;
1165 tmpOp0 = Op1;
1166 // Simplify below
1167 }
1168 }
1169
1170 if (tmpOp1->hasOneUse() &&
1171 match(tmpOp1, m_Xor(m_Value(A), m_Value(B)))) {
1172 if (B == tmpOp0) {
1173 std::swap(A, B);
1174 }
1175 // Notice that the patten (A&(~B)) is actually (A&(-1^B)), so if
1176 // A is originally -1 (or a vector of -1 and undefs), then we enter
1177 // an endless loop. By checking that A is non-constant we ensure that
1178 // we will never get to the loop.
1179 if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
1180 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B));
1181 }
1182 }
1183
1184 // (A&((~A)|B)) -> A&B
1185 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
1186 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
1187 return BinaryOperator::CreateAnd(A, Op1);
1188 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
1189 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
1190 return BinaryOperator::CreateAnd(A, Op0);
1191 }
1192
1193 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1))
1194 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
1195 if (Value *Res = FoldAndOfICmps(LHS, RHS))
1196 return ReplaceInstUsesWith(I, Res);
1197
1198 // If and'ing two fcmp, try combine them into one.
1199 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1200 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1201 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
1202 return ReplaceInstUsesWith(I, Res);
1203
1204
1205 // fold (and (cast A), (cast B)) -> (cast (and A, B))
1206 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
1207 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
1208 Type *SrcTy = Op0C->getOperand(0)->getType();
1209 if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
1210 SrcTy == Op1C->getOperand(0)->getType() &&
1211 SrcTy->isIntOrIntVectorTy()) {
1212 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
1213
1214 // Only do this if the casts both really cause code to be generated.
1215 if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
1216 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
1217 Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
1218 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1219 }
1220
1221 // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
1222 // cast is otherwise not optimizable. This happens for vector sexts.
1223 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
1224 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
1225 if (Value *Res = FoldAndOfICmps(LHS, RHS))
1226 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1227
1228 // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
1229 // cast is otherwise not optimizable. This happens for vector sexts.
1230 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
1231 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
1232 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
1233 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1234 }
1235 }
1236
1237 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
1238 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1239 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1240 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1241 SI0->getOperand(1) == SI1->getOperand(1) &&
1242 (SI0->hasOneUse() || SI1->hasOneUse())) {
1243 Value *NewOp =
1244 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
1245 SI0->getName());
1246 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1247 SI1->getOperand(1));
1248 }
1249 }
1250
1251 return Changed ? &I : 0;
1252 }
1253
1254 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
1255 /// capable of providing pieces of a bswap. The subexpression provides pieces
1256 /// of a bswap if it is proven that each of the non-zero bytes in the output of
1257 /// the expression came from the corresponding "byte swapped" byte in some other
1258 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
1259 /// we know that the expression deposits the low byte of %X into the high byte
1260 /// of the bswap result and that all other bytes are zero. This expression is
1261 /// accepted, the high byte of ByteValues is set to X to indicate a correct
1262 /// match.
1263 ///
1264 /// This function returns true if the match was unsuccessful and false if so.
1265 /// On entry to the function the "OverallLeftShift" is a signed integer value
1266 /// indicating the number of bytes that the subexpression is later shifted. For
1267 /// example, if the expression is later right shifted by 16 bits, the
1268 /// OverallLeftShift value would be -2 on entry. This is used to specify which
1269 /// byte of ByteValues is actually being set.
1270 ///
1271 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
1272 /// byte is masked to zero by a user. For example, in (X & 255), X will be
1273 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
1274 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
1275 /// always in the local (OverallLeftShift) coordinate space.
1276 ///
CollectBSwapParts(Value * V,int OverallLeftShift,uint32_t ByteMask,SmallVector<Value *,8> & ByteValues)1277 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
1278 SmallVector<Value*, 8> &ByteValues) {
1279 if (Instruction *I = dyn_cast<Instruction>(V)) {
1280 // If this is an or instruction, it may be an inner node of the bswap.
1281 if (I->getOpcode() == Instruction::Or) {
1282 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1283 ByteValues) ||
1284 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
1285 ByteValues);
1286 }
1287
1288 // If this is a logical shift by a constant multiple of 8, recurse with
1289 // OverallLeftShift and ByteMask adjusted.
1290 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1291 unsigned ShAmt =
1292 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1293 // Ensure the shift amount is defined and of a byte value.
1294 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
1295 return true;
1296
1297 unsigned ByteShift = ShAmt >> 3;
1298 if (I->getOpcode() == Instruction::Shl) {
1299 // X << 2 -> collect(X, +2)
1300 OverallLeftShift += ByteShift;
1301 ByteMask >>= ByteShift;
1302 } else {
1303 // X >>u 2 -> collect(X, -2)
1304 OverallLeftShift -= ByteShift;
1305 ByteMask <<= ByteShift;
1306 ByteMask &= (~0U >> (32-ByteValues.size()));
1307 }
1308
1309 if (OverallLeftShift >= (int)ByteValues.size()) return true;
1310 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
1311
1312 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1313 ByteValues);
1314 }
1315
1316 // If this is a logical 'and' with a mask that clears bytes, clear the
1317 // corresponding bytes in ByteMask.
1318 if (I->getOpcode() == Instruction::And &&
1319 isa<ConstantInt>(I->getOperand(1))) {
1320 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
1321 unsigned NumBytes = ByteValues.size();
1322 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
1323 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1324
1325 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
1326 // If this byte is masked out by a later operation, we don't care what
1327 // the and mask is.
1328 if ((ByteMask & (1 << i)) == 0)
1329 continue;
1330
1331 // If the AndMask is all zeros for this byte, clear the bit.
1332 APInt MaskB = AndMask & Byte;
1333 if (MaskB == 0) {
1334 ByteMask &= ~(1U << i);
1335 continue;
1336 }
1337
1338 // If the AndMask is not all ones for this byte, it's not a bytezap.
1339 if (MaskB != Byte)
1340 return true;
1341
1342 // Otherwise, this byte is kept.
1343 }
1344
1345 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1346 ByteValues);
1347 }
1348 }
1349
1350 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
1351 // the input value to the bswap. Some observations: 1) if more than one byte
1352 // is demanded from this input, then it could not be successfully assembled
1353 // into a byteswap. At least one of the two bytes would not be aligned with
1354 // their ultimate destination.
1355 if (!isPowerOf2_32(ByteMask)) return true;
1356 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
1357
1358 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
1359 // is demanded, it needs to go into byte 0 of the result. This means that the
1360 // byte needs to be shifted until it lands in the right byte bucket. The
1361 // shift amount depends on the position: if the byte is coming from the high
1362 // part of the value (e.g. byte 3) then it must be shifted right. If from the
1363 // low part, it must be shifted left.
1364 unsigned DestByteNo = InputByteNo + OverallLeftShift;
1365 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1366 return true;
1367
1368 // If the destination byte value is already defined, the values are or'd
1369 // together, which isn't a bswap (unless it's an or of the same bits).
1370 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
1371 return true;
1372 ByteValues[DestByteNo] = V;
1373 return false;
1374 }
1375
1376 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
1377 /// If so, insert the new bswap intrinsic and return it.
MatchBSwap(BinaryOperator & I)1378 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1379 IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
1380 if (!ITy || ITy->getBitWidth() % 16 ||
1381 // ByteMask only allows up to 32-byte values.
1382 ITy->getBitWidth() > 32*8)
1383 return 0; // Can only bswap pairs of bytes. Can't do vectors.
1384
1385 /// ByteValues - For each byte of the result, we keep track of which value
1386 /// defines each byte.
1387 SmallVector<Value*, 8> ByteValues;
1388 ByteValues.resize(ITy->getBitWidth()/8);
1389
1390 // Try to find all the pieces corresponding to the bswap.
1391 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
1392 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
1393 return 0;
1394
1395 // Check to see if all of the bytes come from the same value.
1396 Value *V = ByteValues[0];
1397 if (V == 0) return 0; // Didn't find a byte? Must be zero.
1398
1399 // Check to make sure that all of the bytes come from the same value.
1400 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
1401 if (ByteValues[i] != V)
1402 return 0;
1403 Module *M = I.getParent()->getParent()->getParent();
1404 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
1405 return CallInst::Create(F, V);
1406 }
1407
1408 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
1409 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
1410 /// we can simplify this expression to "cond ? C : D or B".
MatchSelectFromAndOr(Value * A,Value * B,Value * C,Value * D)1411 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
1412 Value *C, Value *D) {
1413 // If A is not a select of -1/0, this cannot match.
1414 Value *Cond = 0;
1415 if (!match(A, m_SExt(m_Value(Cond))) ||
1416 !Cond->getType()->isIntegerTy(1))
1417 return 0;
1418
1419 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
1420 if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
1421 return SelectInst::Create(Cond, C, B);
1422 if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
1423 return SelectInst::Create(Cond, C, B);
1424
1425 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
1426 if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
1427 return SelectInst::Create(Cond, C, D);
1428 if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
1429 return SelectInst::Create(Cond, C, D);
1430 return 0;
1431 }
1432
1433 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
FoldOrOfICmps(ICmpInst * LHS,ICmpInst * RHS)1434 Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
1435 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
1436
1437 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1438 if (PredicatesFoldable(LHSCC, RHSCC)) {
1439 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1440 LHS->getOperand(1) == RHS->getOperand(0))
1441 LHS->swapOperands();
1442 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1443 LHS->getOperand(1) == RHS->getOperand(1)) {
1444 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1445 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1446 bool isSigned = LHS->isSigned() || RHS->isSigned();
1447 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
1448 }
1449 }
1450
1451 // handle (roughly):
1452 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
1453 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_NE, Builder))
1454 return V;
1455
1456 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1457 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
1458 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
1459 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
1460 if (LHSCst == 0 || RHSCst == 0) return 0;
1461
1462 if (LHSCst == RHSCst && LHSCC == RHSCC) {
1463 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1464 if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
1465 Value *NewOr = Builder->CreateOr(Val, Val2);
1466 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
1467 }
1468 }
1469
1470 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
1471 // iff C2 + CA == C1.
1472 if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) {
1473 ConstantInt *AddCst;
1474 if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst))))
1475 if (RHSCst->getValue() + AddCst->getValue() == LHSCst->getValue())
1476 return Builder->CreateICmpULE(Val, LHSCst);
1477 }
1478
1479 // From here on, we only handle:
1480 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1481 if (Val != Val2) return 0;
1482
1483 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
1484 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
1485 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
1486 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
1487 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
1488 return 0;
1489
1490 // We can't fold (ugt x, C) | (sgt x, C2).
1491 if (!PredicatesFoldable(LHSCC, RHSCC))
1492 return 0;
1493
1494 // Ensure that the larger constant is on the RHS.
1495 bool ShouldSwap;
1496 if (CmpInst::isSigned(LHSCC) ||
1497 (ICmpInst::isEquality(LHSCC) &&
1498 CmpInst::isSigned(RHSCC)))
1499 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
1500 else
1501 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
1502
1503 if (ShouldSwap) {
1504 std::swap(LHS, RHS);
1505 std::swap(LHSCst, RHSCst);
1506 std::swap(LHSCC, RHSCC);
1507 }
1508
1509 // At this point, we know we have two icmp instructions
1510 // comparing a value against two constants and or'ing the result
1511 // together. Because of the above check, we know that we only have
1512 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1513 // icmp folding check above), that the two constants are not
1514 // equal.
1515 assert(LHSCst != RHSCst && "Compares not folded above?");
1516
1517 switch (LHSCC) {
1518 default: llvm_unreachable("Unknown integer condition code!");
1519 case ICmpInst::ICMP_EQ:
1520 switch (RHSCC) {
1521 default: llvm_unreachable("Unknown integer condition code!");
1522 case ICmpInst::ICMP_EQ:
1523 if (LHSCst == SubOne(RHSCst)) {
1524 // (X == 13 | X == 14) -> X-13 <u 2
1525 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
1526 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
1527 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
1528 return Builder->CreateICmpULT(Add, AddCST);
1529 }
1530 break; // (X == 13 | X == 15) -> no change
1531 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1532 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1533 break;
1534 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
1535 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
1536 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
1537 return RHS;
1538 }
1539 break;
1540 case ICmpInst::ICMP_NE:
1541 switch (RHSCC) {
1542 default: llvm_unreachable("Unknown integer condition code!");
1543 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
1544 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
1545 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
1546 return LHS;
1547 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
1548 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
1549 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
1550 return ConstantInt::getTrue(LHS->getContext());
1551 }
1552 case ICmpInst::ICMP_ULT:
1553 switch (RHSCC) {
1554 default: llvm_unreachable("Unknown integer condition code!");
1555 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1556 break;
1557 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1558 // If RHSCst is [us]MAXINT, it is always false. Not handling
1559 // this can cause overflow.
1560 if (RHSCst->isMaxValue(false))
1561 return LHS;
1562 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false);
1563 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
1564 break;
1565 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
1566 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
1567 return RHS;
1568 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
1569 break;
1570 }
1571 break;
1572 case ICmpInst::ICMP_SLT:
1573 switch (RHSCC) {
1574 default: llvm_unreachable("Unknown integer condition code!");
1575 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1576 break;
1577 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1578 // If RHSCst is [us]MAXINT, it is always false. Not handling
1579 // this can cause overflow.
1580 if (RHSCst->isMaxValue(true))
1581 return LHS;
1582 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false);
1583 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
1584 break;
1585 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
1586 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
1587 return RHS;
1588 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
1589 break;
1590 }
1591 break;
1592 case ICmpInst::ICMP_UGT:
1593 switch (RHSCC) {
1594 default: llvm_unreachable("Unknown integer condition code!");
1595 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
1596 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
1597 return LHS;
1598 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
1599 break;
1600 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
1601 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
1602 return ConstantInt::getTrue(LHS->getContext());
1603 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
1604 break;
1605 }
1606 break;
1607 case ICmpInst::ICMP_SGT:
1608 switch (RHSCC) {
1609 default: llvm_unreachable("Unknown integer condition code!");
1610 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
1611 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
1612 return LHS;
1613 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
1614 break;
1615 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
1616 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
1617 return ConstantInt::getTrue(LHS->getContext());
1618 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
1619 break;
1620 }
1621 break;
1622 }
1623 return 0;
1624 }
1625
1626 /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
1627 /// instcombine, this returns a Value which should already be inserted into the
1628 /// function.
FoldOrOfFCmps(FCmpInst * LHS,FCmpInst * RHS)1629 Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
1630 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
1631 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
1632 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
1633 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1634 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1635 // If either of the constants are nans, then the whole thing returns
1636 // true.
1637 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1638 return ConstantInt::getTrue(LHS->getContext());
1639
1640 // Otherwise, no need to compare the two constants, compare the
1641 // rest.
1642 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1643 }
1644
1645 // Handle vector zeros. This occurs because the canonical form of
1646 // "fcmp uno x,x" is "fcmp uno x, 0".
1647 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1648 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1649 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1650
1651 return 0;
1652 }
1653
1654 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1655 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1656 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1657
1658 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1659 // Swap RHS operands to match LHS.
1660 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1661 std::swap(Op1LHS, Op1RHS);
1662 }
1663 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
1664 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
1665 if (Op0CC == Op1CC)
1666 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
1667 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
1668 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
1669 if (Op0CC == FCmpInst::FCMP_FALSE)
1670 return RHS;
1671 if (Op1CC == FCmpInst::FCMP_FALSE)
1672 return LHS;
1673 bool Op0Ordered;
1674 bool Op1Ordered;
1675 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
1676 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
1677 if (Op0Ordered == Op1Ordered) {
1678 // If both are ordered or unordered, return a new fcmp with
1679 // or'ed predicates.
1680 return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
1681 }
1682 }
1683 return 0;
1684 }
1685
1686 /// FoldOrWithConstants - This helper function folds:
1687 ///
1688 /// ((A | B) & C1) | (B & C2)
1689 ///
1690 /// into:
1691 ///
1692 /// (A & C1) | B
1693 ///
1694 /// when the XOR of the two constants is "all ones" (-1).
FoldOrWithConstants(BinaryOperator & I,Value * Op,Value * A,Value * B,Value * C)1695 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
1696 Value *A, Value *B, Value *C) {
1697 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1698 if (!CI1) return 0;
1699
1700 Value *V1 = 0;
1701 ConstantInt *CI2 = 0;
1702 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
1703
1704 APInt Xor = CI1->getValue() ^ CI2->getValue();
1705 if (!Xor.isAllOnesValue()) return 0;
1706
1707 if (V1 == A || V1 == B) {
1708 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
1709 return BinaryOperator::CreateOr(NewOp, V1);
1710 }
1711
1712 return 0;
1713 }
1714
visitOr(BinaryOperator & I)1715 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1716 bool Changed = SimplifyAssociativeOrCommutative(I);
1717 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1718
1719 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
1720 return ReplaceInstUsesWith(I, V);
1721
1722 // (A&B)|(A&C) -> A&(B|C) etc
1723 if (Value *V = SimplifyUsingDistributiveLaws(I))
1724 return ReplaceInstUsesWith(I, V);
1725
1726 // See if we can simplify any instructions used by the instruction whose sole
1727 // purpose is to compute bits we don't care about.
1728 if (SimplifyDemandedInstructionBits(I))
1729 return &I;
1730
1731 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1732 ConstantInt *C1 = 0; Value *X = 0;
1733 // (X & C1) | C2 --> (X | C2) & (C1|C2)
1734 // iff (C1 & C2) == 0.
1735 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
1736 (RHS->getValue() & C1->getValue()) != 0 &&
1737 Op0->hasOneUse()) {
1738 Value *Or = Builder->CreateOr(X, RHS);
1739 Or->takeName(Op0);
1740 return BinaryOperator::CreateAnd(Or,
1741 ConstantInt::get(I.getContext(),
1742 RHS->getValue() | C1->getValue()));
1743 }
1744
1745 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
1746 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
1747 Op0->hasOneUse()) {
1748 Value *Or = Builder->CreateOr(X, RHS);
1749 Or->takeName(Op0);
1750 return BinaryOperator::CreateXor(Or,
1751 ConstantInt::get(I.getContext(),
1752 C1->getValue() & ~RHS->getValue()));
1753 }
1754
1755 // Try to fold constant and into select arguments.
1756 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1757 if (Instruction *R = FoldOpIntoSelect(I, SI))
1758 return R;
1759
1760 if (isa<PHINode>(Op0))
1761 if (Instruction *NV = FoldOpIntoPhi(I))
1762 return NV;
1763 }
1764
1765 Value *A = 0, *B = 0;
1766 ConstantInt *C1 = 0, *C2 = 0;
1767
1768 // (A | B) | C and A | (B | C) -> bswap if possible.
1769 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1770 if (match(Op0, m_Or(m_Value(), m_Value())) ||
1771 match(Op1, m_Or(m_Value(), m_Value())) ||
1772 (match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1773 match(Op1, m_LogicalShift(m_Value(), m_Value())))) {
1774 if (Instruction *BSwap = MatchBSwap(I))
1775 return BSwap;
1776 }
1777
1778 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1779 if (Op0->hasOneUse() &&
1780 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1781 MaskedValueIsZero(Op1, C1->getValue())) {
1782 Value *NOr = Builder->CreateOr(A, Op1);
1783 NOr->takeName(Op0);
1784 return BinaryOperator::CreateXor(NOr, C1);
1785 }
1786
1787 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1788 if (Op1->hasOneUse() &&
1789 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1790 MaskedValueIsZero(Op0, C1->getValue())) {
1791 Value *NOr = Builder->CreateOr(A, Op0);
1792 NOr->takeName(Op0);
1793 return BinaryOperator::CreateXor(NOr, C1);
1794 }
1795
1796 // (A & C)|(B & D)
1797 Value *C = 0, *D = 0;
1798 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1799 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1800 Value *V1 = 0, *V2 = 0;
1801 C1 = dyn_cast<ConstantInt>(C);
1802 C2 = dyn_cast<ConstantInt>(D);
1803 if (C1 && C2) { // (A & C1)|(B & C2)
1804 // If we have: ((V + N) & C1) | (V & C2)
1805 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1806 // replace with V+N.
1807 if (C1->getValue() == ~C2->getValue()) {
1808 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
1809 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1810 // Add commutes, try both ways.
1811 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
1812 return ReplaceInstUsesWith(I, A);
1813 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
1814 return ReplaceInstUsesWith(I, A);
1815 }
1816 // Or commutes, try both ways.
1817 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
1818 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1819 // Add commutes, try both ways.
1820 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
1821 return ReplaceInstUsesWith(I, B);
1822 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
1823 return ReplaceInstUsesWith(I, B);
1824 }
1825 }
1826
1827 if ((C1->getValue() & C2->getValue()) == 0) {
1828 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1829 // iff (C1&C2) == 0 and (N&~C1) == 0
1830 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1831 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
1832 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
1833 return BinaryOperator::CreateAnd(A,
1834 ConstantInt::get(A->getContext(),
1835 C1->getValue()|C2->getValue()));
1836 // Or commutes, try both ways.
1837 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1838 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
1839 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
1840 return BinaryOperator::CreateAnd(B,
1841 ConstantInt::get(B->getContext(),
1842 C1->getValue()|C2->getValue()));
1843
1844 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
1845 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
1846 ConstantInt *C3 = 0, *C4 = 0;
1847 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
1848 (C3->getValue() & ~C1->getValue()) == 0 &&
1849 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
1850 (C4->getValue() & ~C2->getValue()) == 0) {
1851 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
1852 return BinaryOperator::CreateAnd(V2,
1853 ConstantInt::get(B->getContext(),
1854 C1->getValue()|C2->getValue()));
1855 }
1856 }
1857 }
1858
1859 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
1860 // Don't do this for vector select idioms, the code generator doesn't handle
1861 // them well yet.
1862 if (!I.getType()->isVectorTy()) {
1863 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
1864 return Match;
1865 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
1866 return Match;
1867 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
1868 return Match;
1869 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
1870 return Match;
1871 }
1872
1873 // ((A&~B)|(~A&B)) -> A^B
1874 if ((match(C, m_Not(m_Specific(D))) &&
1875 match(B, m_Not(m_Specific(A)))))
1876 return BinaryOperator::CreateXor(A, D);
1877 // ((~B&A)|(~A&B)) -> A^B
1878 if ((match(A, m_Not(m_Specific(D))) &&
1879 match(B, m_Not(m_Specific(C)))))
1880 return BinaryOperator::CreateXor(C, D);
1881 // ((A&~B)|(B&~A)) -> A^B
1882 if ((match(C, m_Not(m_Specific(B))) &&
1883 match(D, m_Not(m_Specific(A)))))
1884 return BinaryOperator::CreateXor(A, B);
1885 // ((~B&A)|(B&~A)) -> A^B
1886 if ((match(A, m_Not(m_Specific(B))) &&
1887 match(D, m_Not(m_Specific(C)))))
1888 return BinaryOperator::CreateXor(C, B);
1889
1890 // ((A|B)&1)|(B&-2) -> (A&1) | B
1891 if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
1892 match(A, m_Or(m_Specific(B), m_Value(V1)))) {
1893 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
1894 if (Ret) return Ret;
1895 }
1896 // (B&-2)|((A|B)&1) -> (A&1) | B
1897 if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
1898 match(B, m_Or(m_Value(V1), m_Specific(A)))) {
1899 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
1900 if (Ret) return Ret;
1901 }
1902 }
1903
1904 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
1905 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1906 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1907 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1908 SI0->getOperand(1) == SI1->getOperand(1) &&
1909 (SI0->hasOneUse() || SI1->hasOneUse())) {
1910 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
1911 SI0->getName());
1912 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1913 SI1->getOperand(1));
1914 }
1915 }
1916
1917 // (~A | ~B) == (~(A & B)) - De Morgan's Law
1918 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1919 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1920 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1921 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
1922 I.getName()+".demorgan");
1923 return BinaryOperator::CreateNot(And);
1924 }
1925
1926 // Canonicalize xor to the RHS.
1927 bool SwappedForXor = false;
1928 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
1929 std::swap(Op0, Op1);
1930 SwappedForXor = true;
1931 }
1932
1933 // A | ( A ^ B) -> A | B
1934 // A | (~A ^ B) -> A | ~B
1935 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
1936 if (Op0 == A || Op0 == B)
1937 return BinaryOperator::CreateOr(A, B);
1938
1939 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
1940 Value *Not = Builder->CreateNot(B, B->getName()+".not");
1941 return BinaryOperator::CreateOr(Not, Op0);
1942 }
1943 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
1944 Value *Not = Builder->CreateNot(A, A->getName()+".not");
1945 return BinaryOperator::CreateOr(Not, Op0);
1946 }
1947 }
1948
1949 // A | ~(A | B) -> A | ~B
1950 // A | ~(A ^ B) -> A | ~B
1951 if (match(Op1, m_Not(m_Value(A))))
1952 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
1953 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
1954 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
1955 B->getOpcode() == Instruction::Xor)) {
1956 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
1957 B->getOperand(0);
1958 Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not");
1959 return BinaryOperator::CreateOr(Not, Op0);
1960 }
1961
1962 if (SwappedForXor)
1963 std::swap(Op0, Op1);
1964
1965 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
1966 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
1967 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1968 return ReplaceInstUsesWith(I, Res);
1969
1970 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
1971 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1972 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1973 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
1974 return ReplaceInstUsesWith(I, Res);
1975
1976 // fold (or (cast A), (cast B)) -> (cast (or A, B))
1977 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
1978 CastInst *Op1C = dyn_cast<CastInst>(Op1);
1979 if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
1980 Type *SrcTy = Op0C->getOperand(0)->getType();
1981 if (SrcTy == Op1C->getOperand(0)->getType() &&
1982 SrcTy->isIntOrIntVectorTy()) {
1983 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
1984
1985 if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
1986 // Only do this if the casts both really cause code to be
1987 // generated.
1988 ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
1989 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
1990 Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
1991 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1992 }
1993
1994 // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
1995 // cast is otherwise not optimizable. This happens for vector sexts.
1996 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
1997 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
1998 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1999 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
2000
2001 // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
2002 // cast is otherwise not optimizable. This happens for vector sexts.
2003 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
2004 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
2005 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
2006 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
2007 }
2008 }
2009 }
2010
2011 // or(sext(A), B) -> A ? -1 : B where A is an i1
2012 // or(A, sext(B)) -> B ? -1 : A where B is an i1
2013 if (match(Op0, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1))
2014 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
2015 if (match(Op1, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1))
2016 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2017
2018 // Note: If we've gotten to the point of visiting the outer OR, then the
2019 // inner one couldn't be simplified. If it was a constant, then it won't
2020 // be simplified by a later pass either, so we try swapping the inner/outer
2021 // ORs in the hopes that we'll be able to simplify it this way.
2022 // (X|C) | V --> (X|V) | C
2023 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2024 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
2025 Value *Inner = Builder->CreateOr(A, Op1);
2026 Inner->takeName(Op0);
2027 return BinaryOperator::CreateOr(Inner, C1);
2028 }
2029
2030 return Changed ? &I : 0;
2031 }
2032
visitXor(BinaryOperator & I)2033 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2034 bool Changed = SimplifyAssociativeOrCommutative(I);
2035 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2036
2037 if (Value *V = SimplifyXorInst(Op0, Op1, TD))
2038 return ReplaceInstUsesWith(I, V);
2039
2040 // (A&B)^(A&C) -> A&(B^C) etc
2041 if (Value *V = SimplifyUsingDistributiveLaws(I))
2042 return ReplaceInstUsesWith(I, V);
2043
2044 // See if we can simplify any instructions used by the instruction whose sole
2045 // purpose is to compute bits we don't care about.
2046 if (SimplifyDemandedInstructionBits(I))
2047 return &I;
2048
2049 // Is this a ~ operation?
2050 if (Value *NotOp = dyn_castNotVal(&I)) {
2051 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
2052 if (Op0I->getOpcode() == Instruction::And ||
2053 Op0I->getOpcode() == Instruction::Or) {
2054 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
2055 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
2056 if (dyn_castNotVal(Op0I->getOperand(1)))
2057 Op0I->swapOperands();
2058 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
2059 Value *NotY =
2060 Builder->CreateNot(Op0I->getOperand(1),
2061 Op0I->getOperand(1)->getName()+".not");
2062 if (Op0I->getOpcode() == Instruction::And)
2063 return BinaryOperator::CreateOr(Op0NotVal, NotY);
2064 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
2065 }
2066
2067 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
2068 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
2069 if (isFreeToInvert(Op0I->getOperand(0)) &&
2070 isFreeToInvert(Op0I->getOperand(1))) {
2071 Value *NotX =
2072 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
2073 Value *NotY =
2074 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
2075 if (Op0I->getOpcode() == Instruction::And)
2076 return BinaryOperator::CreateOr(NotX, NotY);
2077 return BinaryOperator::CreateAnd(NotX, NotY);
2078 }
2079
2080 } else if (Op0I->getOpcode() == Instruction::AShr) {
2081 // ~(~X >>s Y) --> (X >>s Y)
2082 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
2083 return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
2084 }
2085 }
2086 }
2087
2088
2089 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2090 if (RHS->isOne() && Op0->hasOneUse())
2091 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
2092 if (CmpInst *CI = dyn_cast<CmpInst>(Op0))
2093 return CmpInst::Create(CI->getOpcode(),
2094 CI->getInversePredicate(),
2095 CI->getOperand(0), CI->getOperand(1));
2096
2097 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
2098 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2099 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
2100 if (CI->hasOneUse() && Op0C->hasOneUse()) {
2101 Instruction::CastOps Opcode = Op0C->getOpcode();
2102 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
2103 (RHS == ConstantExpr::getCast(Opcode,
2104 ConstantInt::getTrue(I.getContext()),
2105 Op0C->getDestTy()))) {
2106 CI->setPredicate(CI->getInversePredicate());
2107 return CastInst::Create(Opcode, CI, Op0C->getType());
2108 }
2109 }
2110 }
2111 }
2112
2113 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2114 // ~(c-X) == X-c-1 == X+(-c-1)
2115 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
2116 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
2117 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
2118 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
2119 ConstantInt::get(I.getType(), 1));
2120 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
2121 }
2122
2123 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2124 if (Op0I->getOpcode() == Instruction::Add) {
2125 // ~(X-c) --> (-c-1)-X
2126 if (RHS->isAllOnesValue()) {
2127 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
2128 return BinaryOperator::CreateSub(
2129 ConstantExpr::getSub(NegOp0CI,
2130 ConstantInt::get(I.getType(), 1)),
2131 Op0I->getOperand(0));
2132 } else if (RHS->getValue().isSignBit()) {
2133 // (X + C) ^ signbit -> (X + C + signbit)
2134 Constant *C = ConstantInt::get(I.getContext(),
2135 RHS->getValue() + Op0CI->getValue());
2136 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
2137
2138 }
2139 } else if (Op0I->getOpcode() == Instruction::Or) {
2140 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
2141 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
2142 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
2143 // Anything in both C1 and C2 is known to be zero, remove it from
2144 // NewRHS.
2145 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
2146 NewRHS = ConstantExpr::getAnd(NewRHS,
2147 ConstantExpr::getNot(CommonBits));
2148 Worklist.Add(Op0I);
2149 I.setOperand(0, Op0I->getOperand(0));
2150 I.setOperand(1, NewRHS);
2151 return &I;
2152 }
2153 }
2154 }
2155 }
2156
2157 // Try to fold constant and into select arguments.
2158 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2159 if (Instruction *R = FoldOpIntoSelect(I, SI))
2160 return R;
2161 if (isa<PHINode>(Op0))
2162 if (Instruction *NV = FoldOpIntoPhi(I))
2163 return NV;
2164 }
2165
2166 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
2167 if (Op1I) {
2168 Value *A, *B;
2169 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
2170 if (A == Op0) { // B^(B|A) == (A|B)^B
2171 Op1I->swapOperands();
2172 I.swapOperands();
2173 std::swap(Op0, Op1);
2174 } else if (B == Op0) { // B^(A|B) == (A|B)^B
2175 I.swapOperands(); // Simplified below.
2176 std::swap(Op0, Op1);
2177 }
2178 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
2179 Op1I->hasOneUse()){
2180 if (A == Op0) { // A^(A&B) -> A^(B&A)
2181 Op1I->swapOperands();
2182 std::swap(A, B);
2183 }
2184 if (B == Op0) { // A^(B&A) -> (B&A)^A
2185 I.swapOperands(); // Simplified below.
2186 std::swap(Op0, Op1);
2187 }
2188 }
2189 }
2190
2191 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
2192 if (Op0I) {
2193 Value *A, *B;
2194 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2195 Op0I->hasOneUse()) {
2196 if (A == Op1) // (B|A)^B == (A|B)^B
2197 std::swap(A, B);
2198 if (B == Op1) // (A|B)^B == A & ~B
2199 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1));
2200 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2201 Op0I->hasOneUse()){
2202 if (A == Op1) // (A&B)^A -> (B&A)^A
2203 std::swap(A, B);
2204 if (B == Op1 && // (B&A)^A == ~B & A
2205 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
2206 return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1);
2207 }
2208 }
2209 }
2210
2211 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
2212 if (Op0I && Op1I && Op0I->isShift() &&
2213 Op0I->getOpcode() == Op1I->getOpcode() &&
2214 Op0I->getOperand(1) == Op1I->getOperand(1) &&
2215 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
2216 Value *NewOp =
2217 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
2218 Op0I->getName());
2219 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
2220 Op1I->getOperand(1));
2221 }
2222
2223 if (Op0I && Op1I) {
2224 Value *A, *B, *C, *D;
2225 // (A & B)^(A | B) -> A ^ B
2226 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2227 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
2228 if ((A == C && B == D) || (A == D && B == C))
2229 return BinaryOperator::CreateXor(A, B);
2230 }
2231 // (A | B)^(A & B) -> A ^ B
2232 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2233 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
2234 if ((A == C && B == D) || (A == D && B == C))
2235 return BinaryOperator::CreateXor(A, B);
2236 }
2237 }
2238
2239 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2240 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2241 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2242 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2243 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2244 LHS->getOperand(1) == RHS->getOperand(0))
2245 LHS->swapOperands();
2246 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2247 LHS->getOperand(1) == RHS->getOperand(1)) {
2248 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2249 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2250 bool isSigned = LHS->isSigned() || RHS->isSigned();
2251 return ReplaceInstUsesWith(I,
2252 getNewICmpValue(isSigned, Code, Op0, Op1,
2253 Builder));
2254 }
2255 }
2256
2257 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
2258 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2259 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
2260 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
2261 Type *SrcTy = Op0C->getOperand(0)->getType();
2262 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
2263 // Only do this if the casts both really cause code to be generated.
2264 ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
2265 I.getType()) &&
2266 ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
2267 I.getType())) {
2268 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
2269 Op1C->getOperand(0), I.getName());
2270 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
2271 }
2272 }
2273 }
2274
2275 return Changed ? &I : 0;
2276 }
2277