1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
13 //
14 // This pass combines things like:
15 // %Y = add i32 %X, 1
16 // %Z = add i32 %Y, 1
17 // into:
18 // %Z = add i32 %X, 2
19 //
20 // This is a simple worklist driven algorithm.
21 //
22 // This pass guarantees that the following canonicalizations are performed on
23 // the program:
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
31 // shifts.
32 // ... etc.
33 //
34 //===----------------------------------------------------------------------===//
35
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "InstCombine.h"
39 #include "llvm/IntrinsicInst.h"
40 #include "llvm/Analysis/ConstantFolding.h"
41 #include "llvm/Analysis/InstructionSimplify.h"
42 #include "llvm/Analysis/MemoryBuiltins.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetLibraryInfo.h"
45 #include "llvm/Transforms/Utils/Local.h"
46 #include "llvm/Support/CFG.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/GetElementPtrTypeIterator.h"
49 #include "llvm/Support/PatternMatch.h"
50 #include "llvm/Support/ValueHandle.h"
51 #include "llvm/ADT/SmallPtrSet.h"
52 #include "llvm/ADT/Statistic.h"
53 #include "llvm/ADT/StringSwitch.h"
54 #include "llvm-c/Initialization.h"
55 #include <algorithm>
56 #include <climits>
57 using namespace llvm;
58 using namespace llvm::PatternMatch;
59
60 STATISTIC(NumCombined , "Number of insts combined");
61 STATISTIC(NumConstProp, "Number of constant folds");
62 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
63 STATISTIC(NumSunkInst , "Number of instructions sunk");
64 STATISTIC(NumExpand, "Number of expansions");
65 STATISTIC(NumFactor , "Number of factorizations");
66 STATISTIC(NumReassoc , "Number of reassociations");
67
68 // Initialization Routines
initializeInstCombine(PassRegistry & Registry)69 void llvm::initializeInstCombine(PassRegistry &Registry) {
70 initializeInstCombinerPass(Registry);
71 }
72
LLVMInitializeInstCombine(LLVMPassRegistryRef R)73 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
74 initializeInstCombine(*unwrap(R));
75 }
76
77 char InstCombiner::ID = 0;
78 INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
79 "Combine redundant instructions", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)80 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
81 INITIALIZE_PASS_END(InstCombiner, "instcombine",
82 "Combine redundant instructions", false, false)
83
84 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
85 AU.setPreservesCFG();
86 AU.addRequired<TargetLibraryInfo>();
87 }
88
89
90 /// ShouldChangeType - Return true if it is desirable to convert a computation
91 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
92 /// type for example, or from a smaller to a larger illegal type.
ShouldChangeType(Type * From,Type * To) const93 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
94 assert(From->isIntegerTy() && To->isIntegerTy());
95
96 // If we don't have TD, we don't know if the source/dest are legal.
97 if (!TD) return false;
98
99 unsigned FromWidth = From->getPrimitiveSizeInBits();
100 unsigned ToWidth = To->getPrimitiveSizeInBits();
101 bool FromLegal = TD->isLegalInteger(FromWidth);
102 bool ToLegal = TD->isLegalInteger(ToWidth);
103
104 // If this is a legal integer from type, and the result would be an illegal
105 // type, don't do the transformation.
106 if (FromLegal && !ToLegal)
107 return false;
108
109 // Otherwise, if both are illegal, do not increase the size of the result. We
110 // do allow things like i160 -> i64, but not i64 -> i160.
111 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
112 return false;
113
114 return true;
115 }
116
117 // Return true, if No Signed Wrap should be maintained for I.
118 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
119 // where both B and C should be ConstantInts, results in a constant that does
120 // not overflow. This function only handles the Add and Sub opcodes. For
121 // all other opcodes, the function conservatively returns false.
MaintainNoSignedWrap(BinaryOperator & I,Value * B,Value * C)122 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
123 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
124 if (!OBO || !OBO->hasNoSignedWrap()) {
125 return false;
126 }
127
128 // We reason about Add and Sub Only.
129 Instruction::BinaryOps Opcode = I.getOpcode();
130 if (Opcode != Instruction::Add &&
131 Opcode != Instruction::Sub) {
132 return false;
133 }
134
135 ConstantInt *CB = dyn_cast<ConstantInt>(B);
136 ConstantInt *CC = dyn_cast<ConstantInt>(C);
137
138 if (!CB || !CC) {
139 return false;
140 }
141
142 const APInt &BVal = CB->getValue();
143 const APInt &CVal = CC->getValue();
144 bool Overflow = false;
145
146 if (Opcode == Instruction::Add) {
147 BVal.sadd_ov(CVal, Overflow);
148 } else {
149 BVal.ssub_ov(CVal, Overflow);
150 }
151
152 return !Overflow;
153 }
154
155 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
156 /// operators which are associative or commutative:
157 //
158 // Commutative operators:
159 //
160 // 1. Order operands such that they are listed from right (least complex) to
161 // left (most complex). This puts constants before unary operators before
162 // binary operators.
163 //
164 // Associative operators:
165 //
166 // 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
167 // 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
168 //
169 // Associative and commutative operators:
170 //
171 // 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
172 // 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
173 // 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
174 // if C1 and C2 are constants.
175 //
SimplifyAssociativeOrCommutative(BinaryOperator & I)176 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
177 Instruction::BinaryOps Opcode = I.getOpcode();
178 bool Changed = false;
179
180 do {
181 // Order operands such that they are listed from right (least complex) to
182 // left (most complex). This puts constants before unary operators before
183 // binary operators.
184 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
185 getComplexity(I.getOperand(1)))
186 Changed = !I.swapOperands();
187
188 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
189 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
190
191 if (I.isAssociative()) {
192 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
193 if (Op0 && Op0->getOpcode() == Opcode) {
194 Value *A = Op0->getOperand(0);
195 Value *B = Op0->getOperand(1);
196 Value *C = I.getOperand(1);
197
198 // Does "B op C" simplify?
199 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
200 // It simplifies to V. Form "A op V".
201 I.setOperand(0, A);
202 I.setOperand(1, V);
203 // Conservatively clear the optional flags, since they may not be
204 // preserved by the reassociation.
205 if (MaintainNoSignedWrap(I, B, C) &&
206 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
207 // Note: this is only valid because SimplifyBinOp doesn't look at
208 // the operands to Op0.
209 I.clearSubclassOptionalData();
210 I.setHasNoSignedWrap(true);
211 } else {
212 I.clearSubclassOptionalData();
213 }
214
215 Changed = true;
216 ++NumReassoc;
217 continue;
218 }
219 }
220
221 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
222 if (Op1 && Op1->getOpcode() == Opcode) {
223 Value *A = I.getOperand(0);
224 Value *B = Op1->getOperand(0);
225 Value *C = Op1->getOperand(1);
226
227 // Does "A op B" simplify?
228 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
229 // It simplifies to V. Form "V op C".
230 I.setOperand(0, V);
231 I.setOperand(1, C);
232 // Conservatively clear the optional flags, since they may not be
233 // preserved by the reassociation.
234 I.clearSubclassOptionalData();
235 Changed = true;
236 ++NumReassoc;
237 continue;
238 }
239 }
240 }
241
242 if (I.isAssociative() && I.isCommutative()) {
243 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
244 if (Op0 && Op0->getOpcode() == Opcode) {
245 Value *A = Op0->getOperand(0);
246 Value *B = Op0->getOperand(1);
247 Value *C = I.getOperand(1);
248
249 // Does "C op A" simplify?
250 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
251 // It simplifies to V. Form "V op B".
252 I.setOperand(0, V);
253 I.setOperand(1, B);
254 // Conservatively clear the optional flags, since they may not be
255 // preserved by the reassociation.
256 I.clearSubclassOptionalData();
257 Changed = true;
258 ++NumReassoc;
259 continue;
260 }
261 }
262
263 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
264 if (Op1 && Op1->getOpcode() == Opcode) {
265 Value *A = I.getOperand(0);
266 Value *B = Op1->getOperand(0);
267 Value *C = Op1->getOperand(1);
268
269 // Does "C op A" simplify?
270 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
271 // It simplifies to V. Form "B op V".
272 I.setOperand(0, B);
273 I.setOperand(1, V);
274 // Conservatively clear the optional flags, since they may not be
275 // preserved by the reassociation.
276 I.clearSubclassOptionalData();
277 Changed = true;
278 ++NumReassoc;
279 continue;
280 }
281 }
282
283 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
284 // if C1 and C2 are constants.
285 if (Op0 && Op1 &&
286 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
287 isa<Constant>(Op0->getOperand(1)) &&
288 isa<Constant>(Op1->getOperand(1)) &&
289 Op0->hasOneUse() && Op1->hasOneUse()) {
290 Value *A = Op0->getOperand(0);
291 Constant *C1 = cast<Constant>(Op0->getOperand(1));
292 Value *B = Op1->getOperand(0);
293 Constant *C2 = cast<Constant>(Op1->getOperand(1));
294
295 Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
296 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
297 InsertNewInstWith(New, I);
298 New->takeName(Op1);
299 I.setOperand(0, New);
300 I.setOperand(1, Folded);
301 // Conservatively clear the optional flags, since they may not be
302 // preserved by the reassociation.
303 I.clearSubclassOptionalData();
304
305 Changed = true;
306 continue;
307 }
308 }
309
310 // No further simplifications.
311 return Changed;
312 } while (1);
313 }
314
315 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
316 /// "(X LOp Y) ROp (X LOp Z)".
LeftDistributesOverRight(Instruction::BinaryOps LOp,Instruction::BinaryOps ROp)317 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
318 Instruction::BinaryOps ROp) {
319 switch (LOp) {
320 default:
321 return false;
322
323 case Instruction::And:
324 // And distributes over Or and Xor.
325 switch (ROp) {
326 default:
327 return false;
328 case Instruction::Or:
329 case Instruction::Xor:
330 return true;
331 }
332
333 case Instruction::Mul:
334 // Multiplication distributes over addition and subtraction.
335 switch (ROp) {
336 default:
337 return false;
338 case Instruction::Add:
339 case Instruction::Sub:
340 return true;
341 }
342
343 case Instruction::Or:
344 // Or distributes over And.
345 switch (ROp) {
346 default:
347 return false;
348 case Instruction::And:
349 return true;
350 }
351 }
352 }
353
354 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
355 /// "(X ROp Z) LOp (Y ROp Z)".
RightDistributesOverLeft(Instruction::BinaryOps LOp,Instruction::BinaryOps ROp)356 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
357 Instruction::BinaryOps ROp) {
358 if (Instruction::isCommutative(ROp))
359 return LeftDistributesOverRight(ROp, LOp);
360 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
361 // but this requires knowing that the addition does not overflow and other
362 // such subtleties.
363 return false;
364 }
365
366 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
367 /// which some other binary operation distributes over either by factorizing
368 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
369 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
370 /// a win). Returns the simplified value, or null if it didn't simplify.
SimplifyUsingDistributiveLaws(BinaryOperator & I)371 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
372 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
373 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
374 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
375 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
376
377 // Factorization.
378 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
379 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
380 // a common term.
381 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
382 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
383 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
384
385 // Does "X op' Y" always equal "Y op' X"?
386 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
387
388 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
389 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
390 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
391 // commutative case, "(A op' B) op (C op' A)"?
392 if (A == C || (InnerCommutative && A == D)) {
393 if (A != C)
394 std::swap(C, D);
395 // Consider forming "A op' (B op D)".
396 // If "B op D" simplifies then it can be formed with no cost.
397 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
398 // If "B op D" doesn't simplify then only go on if both of the existing
399 // operations "A op' B" and "C op' D" will be zapped as no longer used.
400 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
401 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
402 if (V) {
403 ++NumFactor;
404 V = Builder->CreateBinOp(InnerOpcode, A, V);
405 V->takeName(&I);
406 return V;
407 }
408 }
409
410 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
411 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
412 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
413 // commutative case, "(A op' B) op (B op' D)"?
414 if (B == D || (InnerCommutative && B == C)) {
415 if (B != D)
416 std::swap(C, D);
417 // Consider forming "(A op C) op' B".
418 // If "A op C" simplifies then it can be formed with no cost.
419 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
420 // If "A op C" doesn't simplify then only go on if both of the existing
421 // operations "A op' B" and "C op' D" will be zapped as no longer used.
422 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
423 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
424 if (V) {
425 ++NumFactor;
426 V = Builder->CreateBinOp(InnerOpcode, V, B);
427 V->takeName(&I);
428 return V;
429 }
430 }
431 }
432
433 // Expansion.
434 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
435 // The instruction has the form "(A op' B) op C". See if expanding it out
436 // to "(A op C) op' (B op C)" results in simplifications.
437 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
438 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
439
440 // Do "A op C" and "B op C" both simplify?
441 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
442 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
443 // They do! Return "L op' R".
444 ++NumExpand;
445 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
446 if ((L == A && R == B) ||
447 (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
448 return Op0;
449 // Otherwise return "L op' R" if it simplifies.
450 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
451 return V;
452 // Otherwise, create a new instruction.
453 C = Builder->CreateBinOp(InnerOpcode, L, R);
454 C->takeName(&I);
455 return C;
456 }
457 }
458
459 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
460 // The instruction has the form "A op (B op' C)". See if expanding it out
461 // to "(A op B) op' (A op C)" results in simplifications.
462 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
463 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
464
465 // Do "A op B" and "A op C" both simplify?
466 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
467 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
468 // They do! Return "L op' R".
469 ++NumExpand;
470 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
471 if ((L == B && R == C) ||
472 (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
473 return Op1;
474 // Otherwise return "L op' R" if it simplifies.
475 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
476 return V;
477 // Otherwise, create a new instruction.
478 A = Builder->CreateBinOp(InnerOpcode, L, R);
479 A->takeName(&I);
480 return A;
481 }
482 }
483
484 return 0;
485 }
486
487 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
488 // if the LHS is a constant zero (which is the 'negate' form).
489 //
dyn_castNegVal(Value * V) const490 Value *InstCombiner::dyn_castNegVal(Value *V) const {
491 if (BinaryOperator::isNeg(V))
492 return BinaryOperator::getNegArgument(V);
493
494 // Constants can be considered to be negated values if they can be folded.
495 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
496 return ConstantExpr::getNeg(C);
497
498 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
499 if (C->getType()->getElementType()->isIntegerTy())
500 return ConstantExpr::getNeg(C);
501
502 return 0;
503 }
504
505 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
506 // instruction if the LHS is a constant negative zero (which is the 'negate'
507 // form).
508 //
dyn_castFNegVal(Value * V) const509 Value *InstCombiner::dyn_castFNegVal(Value *V) const {
510 if (BinaryOperator::isFNeg(V))
511 return BinaryOperator::getFNegArgument(V);
512
513 // Constants can be considered to be negated values if they can be folded.
514 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
515 return ConstantExpr::getFNeg(C);
516
517 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
518 if (C->getType()->getElementType()->isFloatingPointTy())
519 return ConstantExpr::getFNeg(C);
520
521 return 0;
522 }
523
FoldOperationIntoSelectOperand(Instruction & I,Value * SO,InstCombiner * IC)524 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
525 InstCombiner *IC) {
526 if (CastInst *CI = dyn_cast<CastInst>(&I)) {
527 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
528 }
529
530 // Figure out if the constant is the left or the right argument.
531 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
532 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
533
534 if (Constant *SOC = dyn_cast<Constant>(SO)) {
535 if (ConstIsRHS)
536 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
537 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
538 }
539
540 Value *Op0 = SO, *Op1 = ConstOperand;
541 if (!ConstIsRHS)
542 std::swap(Op0, Op1);
543
544 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
545 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
546 SO->getName()+".op");
547 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
548 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
549 SO->getName()+".cmp");
550 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
551 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
552 SO->getName()+".cmp");
553 llvm_unreachable("Unknown binary instruction type!");
554 }
555
556 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
557 // constant as the other operand, try to fold the binary operator into the
558 // select arguments. This also works for Cast instructions, which obviously do
559 // not have a second operand.
FoldOpIntoSelect(Instruction & Op,SelectInst * SI)560 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
561 // Don't modify shared select instructions
562 if (!SI->hasOneUse()) return 0;
563 Value *TV = SI->getOperand(1);
564 Value *FV = SI->getOperand(2);
565
566 if (isa<Constant>(TV) || isa<Constant>(FV)) {
567 // Bool selects with constant operands can be folded to logical ops.
568 if (SI->getType()->isIntegerTy(1)) return 0;
569
570 // If it's a bitcast involving vectors, make sure it has the same number of
571 // elements on both sides.
572 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
573 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
574 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
575
576 // Verify that either both or neither are vectors.
577 if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
578 // If vectors, verify that they have the same number of elements.
579 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
580 return 0;
581 }
582
583 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
584 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
585
586 return SelectInst::Create(SI->getCondition(),
587 SelectTrueVal, SelectFalseVal);
588 }
589 return 0;
590 }
591
592
593 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
594 /// has a PHI node as operand #0, see if we can fold the instruction into the
595 /// PHI (which is only possible if all operands to the PHI are constants).
596 ///
FoldOpIntoPhi(Instruction & I)597 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
598 PHINode *PN = cast<PHINode>(I.getOperand(0));
599 unsigned NumPHIValues = PN->getNumIncomingValues();
600 if (NumPHIValues == 0)
601 return 0;
602
603 // We normally only transform phis with a single use. However, if a PHI has
604 // multiple uses and they are all the same operation, we can fold *all* of the
605 // uses into the PHI.
606 if (!PN->hasOneUse()) {
607 // Walk the use list for the instruction, comparing them to I.
608 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
609 UI != E; ++UI) {
610 Instruction *User = cast<Instruction>(*UI);
611 if (User != &I && !I.isIdenticalTo(User))
612 return 0;
613 }
614 // Otherwise, we can replace *all* users with the new PHI we form.
615 }
616
617 // Check to see if all of the operands of the PHI are simple constants
618 // (constantint/constantfp/undef). If there is one non-constant value,
619 // remember the BB it is in. If there is more than one or if *it* is a PHI,
620 // bail out. We don't do arbitrary constant expressions here because moving
621 // their computation can be expensive without a cost model.
622 BasicBlock *NonConstBB = 0;
623 for (unsigned i = 0; i != NumPHIValues; ++i) {
624 Value *InVal = PN->getIncomingValue(i);
625 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
626 continue;
627
628 if (isa<PHINode>(InVal)) return 0; // Itself a phi.
629 if (NonConstBB) return 0; // More than one non-const value.
630
631 NonConstBB = PN->getIncomingBlock(i);
632
633 // If the InVal is an invoke at the end of the pred block, then we can't
634 // insert a computation after it without breaking the edge.
635 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
636 if (II->getParent() == NonConstBB)
637 return 0;
638
639 // If the incoming non-constant value is in I's block, we will remove one
640 // instruction, but insert another equivalent one, leading to infinite
641 // instcombine.
642 if (NonConstBB == I.getParent())
643 return 0;
644 }
645
646 // If there is exactly one non-constant value, we can insert a copy of the
647 // operation in that block. However, if this is a critical edge, we would be
648 // inserting the computation one some other paths (e.g. inside a loop). Only
649 // do this if the pred block is unconditionally branching into the phi block.
650 if (NonConstBB != 0) {
651 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
652 if (!BI || !BI->isUnconditional()) return 0;
653 }
654
655 // Okay, we can do the transformation: create the new PHI node.
656 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
657 InsertNewInstBefore(NewPN, *PN);
658 NewPN->takeName(PN);
659
660 // If we are going to have to insert a new computation, do so right before the
661 // predecessors terminator.
662 if (NonConstBB)
663 Builder->SetInsertPoint(NonConstBB->getTerminator());
664
665 // Next, add all of the operands to the PHI.
666 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
667 // We only currently try to fold the condition of a select when it is a phi,
668 // not the true/false values.
669 Value *TrueV = SI->getTrueValue();
670 Value *FalseV = SI->getFalseValue();
671 BasicBlock *PhiTransBB = PN->getParent();
672 for (unsigned i = 0; i != NumPHIValues; ++i) {
673 BasicBlock *ThisBB = PN->getIncomingBlock(i);
674 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
675 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
676 Value *InV = 0;
677 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
678 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
679 else
680 InV = Builder->CreateSelect(PN->getIncomingValue(i),
681 TrueVInPred, FalseVInPred, "phitmp");
682 NewPN->addIncoming(InV, ThisBB);
683 }
684 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
685 Constant *C = cast<Constant>(I.getOperand(1));
686 for (unsigned i = 0; i != NumPHIValues; ++i) {
687 Value *InV = 0;
688 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
689 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
690 else if (isa<ICmpInst>(CI))
691 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
692 C, "phitmp");
693 else
694 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
695 C, "phitmp");
696 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
697 }
698 } else if (I.getNumOperands() == 2) {
699 Constant *C = cast<Constant>(I.getOperand(1));
700 for (unsigned i = 0; i != NumPHIValues; ++i) {
701 Value *InV = 0;
702 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
703 InV = ConstantExpr::get(I.getOpcode(), InC, C);
704 else
705 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
706 PN->getIncomingValue(i), C, "phitmp");
707 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
708 }
709 } else {
710 CastInst *CI = cast<CastInst>(&I);
711 Type *RetTy = CI->getType();
712 for (unsigned i = 0; i != NumPHIValues; ++i) {
713 Value *InV;
714 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
715 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
716 else
717 InV = Builder->CreateCast(CI->getOpcode(),
718 PN->getIncomingValue(i), I.getType(), "phitmp");
719 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
720 }
721 }
722
723 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
724 UI != E; ) {
725 Instruction *User = cast<Instruction>(*UI++);
726 if (User == &I) continue;
727 ReplaceInstUsesWith(*User, NewPN);
728 EraseInstFromFunction(*User);
729 }
730 return ReplaceInstUsesWith(I, NewPN);
731 }
732
733 /// FindElementAtOffset - Given a type and a constant offset, determine whether
734 /// or not there is a sequence of GEP indices into the type that will land us at
735 /// the specified offset. If so, fill them into NewIndices and return the
736 /// resultant element type, otherwise return null.
FindElementAtOffset(Type * Ty,int64_t Offset,SmallVectorImpl<Value * > & NewIndices)737 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
738 SmallVectorImpl<Value*> &NewIndices) {
739 if (!TD) return 0;
740 if (!Ty->isSized()) return 0;
741
742 // Start with the index over the outer type. Note that the type size
743 // might be zero (even if the offset isn't zero) if the indexed type
744 // is something like [0 x {int, int}]
745 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
746 int64_t FirstIdx = 0;
747 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
748 FirstIdx = Offset/TySize;
749 Offset -= FirstIdx*TySize;
750
751 // Handle hosts where % returns negative instead of values [0..TySize).
752 if (Offset < 0) {
753 --FirstIdx;
754 Offset += TySize;
755 assert(Offset >= 0);
756 }
757 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
758 }
759
760 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
761
762 // Index into the types. If we fail, set OrigBase to null.
763 while (Offset) {
764 // Indexing into tail padding between struct/array elements.
765 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
766 return 0;
767
768 if (StructType *STy = dyn_cast<StructType>(Ty)) {
769 const StructLayout *SL = TD->getStructLayout(STy);
770 assert(Offset < (int64_t)SL->getSizeInBytes() &&
771 "Offset must stay within the indexed type");
772
773 unsigned Elt = SL->getElementContainingOffset(Offset);
774 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
775 Elt));
776
777 Offset -= SL->getElementOffset(Elt);
778 Ty = STy->getElementType(Elt);
779 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
780 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
781 assert(EltSize && "Cannot index into a zero-sized array");
782 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
783 Offset %= EltSize;
784 Ty = AT->getElementType();
785 } else {
786 // Otherwise, we can't index into the middle of this atomic type, bail.
787 return 0;
788 }
789 }
790
791 return Ty;
792 }
793
shouldMergeGEPs(GEPOperator & GEP,GEPOperator & Src)794 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
795 // If this GEP has only 0 indices, it is the same pointer as
796 // Src. If Src is not a trivial GEP too, don't combine
797 // the indices.
798 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
799 !Src.hasOneUse())
800 return false;
801 return true;
802 }
803
visitGetElementPtrInst(GetElementPtrInst & GEP)804 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
805 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
806
807 if (Value *V = SimplifyGEPInst(Ops, TD))
808 return ReplaceInstUsesWith(GEP, V);
809
810 Value *PtrOp = GEP.getOperand(0);
811
812 // Eliminate unneeded casts for indices, and replace indices which displace
813 // by multiples of a zero size type with zero.
814 if (TD) {
815 bool MadeChange = false;
816 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
817
818 gep_type_iterator GTI = gep_type_begin(GEP);
819 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
820 I != E; ++I, ++GTI) {
821 // Skip indices into struct types.
822 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
823 if (!SeqTy) continue;
824
825 // If the element type has zero size then any index over it is equivalent
826 // to an index of zero, so replace it with zero if it is not zero already.
827 if (SeqTy->getElementType()->isSized() &&
828 TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
829 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
830 *I = Constant::getNullValue(IntPtrTy);
831 MadeChange = true;
832 }
833
834 Type *IndexTy = (*I)->getType();
835 if (IndexTy != IntPtrTy && !IndexTy->isVectorTy()) {
836 // If we are using a wider index than needed for this platform, shrink
837 // it to what we need. If narrower, sign-extend it to what we need.
838 // This explicit cast can make subsequent optimizations more obvious.
839 *I = Builder->CreateIntCast(*I, IntPtrTy, true);
840 MadeChange = true;
841 }
842 }
843 if (MadeChange) return &GEP;
844 }
845
846 // Combine Indices - If the source pointer to this getelementptr instruction
847 // is a getelementptr instruction, combine the indices of the two
848 // getelementptr instructions into a single instruction.
849 //
850 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
851 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
852 return 0;
853
854 // Note that if our source is a gep chain itself that we wait for that
855 // chain to be resolved before we perform this transformation. This
856 // avoids us creating a TON of code in some cases.
857 if (GEPOperator *SrcGEP =
858 dyn_cast<GEPOperator>(Src->getOperand(0)))
859 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
860 return 0; // Wait until our source is folded to completion.
861
862 SmallVector<Value*, 8> Indices;
863
864 // Find out whether the last index in the source GEP is a sequential idx.
865 bool EndsWithSequential = false;
866 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
867 I != E; ++I)
868 EndsWithSequential = !(*I)->isStructTy();
869
870 // Can we combine the two pointer arithmetics offsets?
871 if (EndsWithSequential) {
872 // Replace: gep (gep %P, long B), long A, ...
873 // With: T = long A+B; gep %P, T, ...
874 //
875 Value *Sum;
876 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
877 Value *GO1 = GEP.getOperand(1);
878 if (SO1 == Constant::getNullValue(SO1->getType())) {
879 Sum = GO1;
880 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
881 Sum = SO1;
882 } else {
883 // If they aren't the same type, then the input hasn't been processed
884 // by the loop above yet (which canonicalizes sequential index types to
885 // intptr_t). Just avoid transforming this until the input has been
886 // normalized.
887 if (SO1->getType() != GO1->getType())
888 return 0;
889 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
890 }
891
892 // Update the GEP in place if possible.
893 if (Src->getNumOperands() == 2) {
894 GEP.setOperand(0, Src->getOperand(0));
895 GEP.setOperand(1, Sum);
896 return &GEP;
897 }
898 Indices.append(Src->op_begin()+1, Src->op_end()-1);
899 Indices.push_back(Sum);
900 Indices.append(GEP.op_begin()+2, GEP.op_end());
901 } else if (isa<Constant>(*GEP.idx_begin()) &&
902 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
903 Src->getNumOperands() != 1) {
904 // Otherwise we can do the fold if the first index of the GEP is a zero
905 Indices.append(Src->op_begin()+1, Src->op_end());
906 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
907 }
908
909 if (!Indices.empty())
910 return (GEP.isInBounds() && Src->isInBounds()) ?
911 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
912 GEP.getName()) :
913 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
914 }
915
916 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
917 Value *StrippedPtr = PtrOp->stripPointerCasts();
918 PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
919
920 // We do not handle pointer-vector geps here.
921 if (!StrippedPtrTy)
922 return 0;
923
924 if (StrippedPtr != PtrOp &&
925 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
926
927 bool HasZeroPointerIndex = false;
928 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
929 HasZeroPointerIndex = C->isZero();
930
931 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
932 // into : GEP [10 x i8]* X, i32 0, ...
933 //
934 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
935 // into : GEP i8* X, ...
936 //
937 // This occurs when the program declares an array extern like "int X[];"
938 if (HasZeroPointerIndex) {
939 PointerType *CPTy = cast<PointerType>(PtrOp->getType());
940 if (ArrayType *CATy =
941 dyn_cast<ArrayType>(CPTy->getElementType())) {
942 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
943 if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
944 // -> GEP i8* X, ...
945 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
946 GetElementPtrInst *Res =
947 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
948 Res->setIsInBounds(GEP.isInBounds());
949 return Res;
950 }
951
952 if (ArrayType *XATy =
953 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
954 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
955 if (CATy->getElementType() == XATy->getElementType()) {
956 // -> GEP [10 x i8]* X, i32 0, ...
957 // At this point, we know that the cast source type is a pointer
958 // to an array of the same type as the destination pointer
959 // array. Because the array type is never stepped over (there
960 // is a leading zero) we can fold the cast into this GEP.
961 GEP.setOperand(0, StrippedPtr);
962 return &GEP;
963 }
964 }
965 }
966 } else if (GEP.getNumOperands() == 2) {
967 // Transform things like:
968 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
969 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
970 Type *SrcElTy = StrippedPtrTy->getElementType();
971 Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
972 if (TD && SrcElTy->isArrayTy() &&
973 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
974 TD->getTypeAllocSize(ResElTy)) {
975 Value *Idx[2];
976 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
977 Idx[1] = GEP.getOperand(1);
978 Value *NewGEP = GEP.isInBounds() ?
979 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
980 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
981 // V and GEP are both pointer types --> BitCast
982 return new BitCastInst(NewGEP, GEP.getType());
983 }
984
985 // Transform things like:
986 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
987 // (where tmp = 8*tmp2) into:
988 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
989
990 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
991 uint64_t ArrayEltSize =
992 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
993
994 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
995 // allow either a mul, shift, or constant here.
996 Value *NewIdx = 0;
997 ConstantInt *Scale = 0;
998 if (ArrayEltSize == 1) {
999 NewIdx = GEP.getOperand(1);
1000 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
1001 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
1002 NewIdx = ConstantInt::get(CI->getType(), 1);
1003 Scale = CI;
1004 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
1005 if (Inst->getOpcode() == Instruction::Shl &&
1006 isa<ConstantInt>(Inst->getOperand(1))) {
1007 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
1008 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
1009 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
1010 1ULL << ShAmtVal);
1011 NewIdx = Inst->getOperand(0);
1012 } else if (Inst->getOpcode() == Instruction::Mul &&
1013 isa<ConstantInt>(Inst->getOperand(1))) {
1014 Scale = cast<ConstantInt>(Inst->getOperand(1));
1015 NewIdx = Inst->getOperand(0);
1016 }
1017 }
1018
1019 // If the index will be to exactly the right offset with the scale taken
1020 // out, perform the transformation. Note, we don't know whether Scale is
1021 // signed or not. We'll use unsigned version of division/modulo
1022 // operation after making sure Scale doesn't have the sign bit set.
1023 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
1024 Scale->getZExtValue() % ArrayEltSize == 0) {
1025 Scale = ConstantInt::get(Scale->getType(),
1026 Scale->getZExtValue() / ArrayEltSize);
1027 if (Scale->getZExtValue() != 1) {
1028 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
1029 false /*ZExt*/);
1030 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
1031 }
1032
1033 // Insert the new GEP instruction.
1034 Value *Idx[2];
1035 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
1036 Idx[1] = NewIdx;
1037 Value *NewGEP = GEP.isInBounds() ?
1038 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()):
1039 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1040 // The NewGEP must be pointer typed, so must the old one -> BitCast
1041 return new BitCastInst(NewGEP, GEP.getType());
1042 }
1043 }
1044 }
1045 }
1046
1047 /// See if we can simplify:
1048 /// X = bitcast A* to B*
1049 /// Y = gep X, <...constant indices...>
1050 /// into a gep of the original struct. This is important for SROA and alias
1051 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
1052 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1053 if (TD &&
1054 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
1055 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
1056
1057 // Determine how much the GEP moves the pointer. We are guaranteed to get
1058 // a constant back from EmitGEPOffset.
1059 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
1060 int64_t Offset = OffsetV->getSExtValue();
1061
1062 // If this GEP instruction doesn't move the pointer, just replace the GEP
1063 // with a bitcast of the real input to the dest type.
1064 if (Offset == 0) {
1065 // If the bitcast is of an allocation, and the allocation will be
1066 // converted to match the type of the cast, don't touch this.
1067 if (isa<AllocaInst>(BCI->getOperand(0)) ||
1068 isMalloc(BCI->getOperand(0))) {
1069 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1070 if (Instruction *I = visitBitCast(*BCI)) {
1071 if (I != BCI) {
1072 I->takeName(BCI);
1073 BCI->getParent()->getInstList().insert(BCI, I);
1074 ReplaceInstUsesWith(*BCI, I);
1075 }
1076 return &GEP;
1077 }
1078 }
1079 return new BitCastInst(BCI->getOperand(0), GEP.getType());
1080 }
1081
1082 // Otherwise, if the offset is non-zero, we need to find out if there is a
1083 // field at Offset in 'A's type. If so, we can pull the cast through the
1084 // GEP.
1085 SmallVector<Value*, 8> NewIndices;
1086 Type *InTy =
1087 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
1088 if (FindElementAtOffset(InTy, Offset, NewIndices)) {
1089 Value *NGEP = GEP.isInBounds() ?
1090 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
1091 Builder->CreateGEP(BCI->getOperand(0), NewIndices);
1092
1093 if (NGEP->getType() == GEP.getType())
1094 return ReplaceInstUsesWith(GEP, NGEP);
1095 NGEP->takeName(&GEP);
1096 return new BitCastInst(NGEP, GEP.getType());
1097 }
1098 }
1099 }
1100
1101 return 0;
1102 }
1103
1104
1105
IsOnlyNullComparedAndFreed(Value * V,SmallVectorImpl<WeakVH> & Users,int Depth=0)1106 static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users,
1107 int Depth = 0) {
1108 if (Depth == 8)
1109 return false;
1110
1111 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
1112 UI != UE; ++UI) {
1113 User *U = *UI;
1114 if (isFreeCall(U)) {
1115 Users.push_back(U);
1116 continue;
1117 }
1118 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
1119 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) {
1120 Users.push_back(ICI);
1121 continue;
1122 }
1123 }
1124 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
1125 if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) {
1126 Users.push_back(BCI);
1127 continue;
1128 }
1129 }
1130 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1131 if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) {
1132 Users.push_back(GEPI);
1133 continue;
1134 }
1135 }
1136 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1137 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1138 II->getIntrinsicID() == Intrinsic::lifetime_end) {
1139 Users.push_back(II);
1140 continue;
1141 }
1142 }
1143 return false;
1144 }
1145 return true;
1146 }
1147
visitMalloc(Instruction & MI)1148 Instruction *InstCombiner::visitMalloc(Instruction &MI) {
1149 // If we have a malloc call which is only used in any amount of comparisons
1150 // to null and free calls, delete the calls and replace the comparisons with
1151 // true or false as appropriate.
1152 SmallVector<WeakVH, 64> Users;
1153 if (IsOnlyNullComparedAndFreed(&MI, Users)) {
1154 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
1155 Instruction *I = cast_or_null<Instruction>(&*Users[i]);
1156 if (!I) continue;
1157
1158 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
1159 ReplaceInstUsesWith(*C,
1160 ConstantInt::get(Type::getInt1Ty(C->getContext()),
1161 C->isFalseWhenEqual()));
1162 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
1163 ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
1164 }
1165 EraseInstFromFunction(*I);
1166 }
1167 return EraseInstFromFunction(MI);
1168 }
1169 return 0;
1170 }
1171
1172
1173
visitFree(CallInst & FI)1174 Instruction *InstCombiner::visitFree(CallInst &FI) {
1175 Value *Op = FI.getArgOperand(0);
1176
1177 // free undef -> unreachable.
1178 if (isa<UndefValue>(Op)) {
1179 // Insert a new store to null because we cannot modify the CFG here.
1180 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1181 UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1182 return EraseInstFromFunction(FI);
1183 }
1184
1185 // If we have 'free null' delete the instruction. This can happen in stl code
1186 // when lots of inlining happens.
1187 if (isa<ConstantPointerNull>(Op))
1188 return EraseInstFromFunction(FI);
1189
1190 return 0;
1191 }
1192
1193
1194
visitBranchInst(BranchInst & BI)1195 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1196 // Change br (not X), label True, label False to: br X, label False, True
1197 Value *X = 0;
1198 BasicBlock *TrueDest;
1199 BasicBlock *FalseDest;
1200 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1201 !isa<Constant>(X)) {
1202 // Swap Destinations and condition...
1203 BI.setCondition(X);
1204 BI.swapSuccessors();
1205 return &BI;
1206 }
1207
1208 // Cannonicalize fcmp_one -> fcmp_oeq
1209 FCmpInst::Predicate FPred; Value *Y;
1210 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1211 TrueDest, FalseDest)) &&
1212 BI.getCondition()->hasOneUse())
1213 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1214 FPred == FCmpInst::FCMP_OGE) {
1215 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1216 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1217
1218 // Swap Destinations and condition.
1219 BI.swapSuccessors();
1220 Worklist.Add(Cond);
1221 return &BI;
1222 }
1223
1224 // Cannonicalize icmp_ne -> icmp_eq
1225 ICmpInst::Predicate IPred;
1226 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1227 TrueDest, FalseDest)) &&
1228 BI.getCondition()->hasOneUse())
1229 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
1230 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1231 IPred == ICmpInst::ICMP_SGE) {
1232 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1233 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1234 // Swap Destinations and condition.
1235 BI.swapSuccessors();
1236 Worklist.Add(Cond);
1237 return &BI;
1238 }
1239
1240 return 0;
1241 }
1242
visitSwitchInst(SwitchInst & SI)1243 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1244 Value *Cond = SI.getCondition();
1245 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1246 if (I->getOpcode() == Instruction::Add)
1247 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1248 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1249 // Skip the first item since that's the default case.
1250 for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
1251 i != e; ++i) {
1252 ConstantInt* CaseVal = i.getCaseValue();
1253 Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
1254 AddRHS);
1255 assert(isa<ConstantInt>(NewCaseVal) &&
1256 "Result of expression should be constant");
1257 i.setValue(cast<ConstantInt>(NewCaseVal));
1258 }
1259 SI.setCondition(I->getOperand(0));
1260 Worklist.Add(I);
1261 return &SI;
1262 }
1263 }
1264 return 0;
1265 }
1266
visitExtractValueInst(ExtractValueInst & EV)1267 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
1268 Value *Agg = EV.getAggregateOperand();
1269
1270 if (!EV.hasIndices())
1271 return ReplaceInstUsesWith(EV, Agg);
1272
1273 if (Constant *C = dyn_cast<Constant>(Agg)) {
1274 if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) {
1275 if (EV.getNumIndices() == 0)
1276 return ReplaceInstUsesWith(EV, C2);
1277 // Extract the remaining indices out of the constant indexed by the
1278 // first index
1279 return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
1280 }
1281 return 0; // Can't handle other constants
1282 }
1283
1284 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
1285 // We're extracting from an insertvalue instruction, compare the indices
1286 const unsigned *exti, *exte, *insi, *inse;
1287 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
1288 exte = EV.idx_end(), inse = IV->idx_end();
1289 exti != exte && insi != inse;
1290 ++exti, ++insi) {
1291 if (*insi != *exti)
1292 // The insert and extract both reference distinctly different elements.
1293 // This means the extract is not influenced by the insert, and we can
1294 // replace the aggregate operand of the extract with the aggregate
1295 // operand of the insert. i.e., replace
1296 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1297 // %E = extractvalue { i32, { i32 } } %I, 0
1298 // with
1299 // %E = extractvalue { i32, { i32 } } %A, 0
1300 return ExtractValueInst::Create(IV->getAggregateOperand(),
1301 EV.getIndices());
1302 }
1303 if (exti == exte && insi == inse)
1304 // Both iterators are at the end: Index lists are identical. Replace
1305 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1306 // %C = extractvalue { i32, { i32 } } %B, 1, 0
1307 // with "i32 42"
1308 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
1309 if (exti == exte) {
1310 // The extract list is a prefix of the insert list. i.e. replace
1311 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1312 // %E = extractvalue { i32, { i32 } } %I, 1
1313 // with
1314 // %X = extractvalue { i32, { i32 } } %A, 1
1315 // %E = insertvalue { i32 } %X, i32 42, 0
1316 // by switching the order of the insert and extract (though the
1317 // insertvalue should be left in, since it may have other uses).
1318 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
1319 EV.getIndices());
1320 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
1321 makeArrayRef(insi, inse));
1322 }
1323 if (insi == inse)
1324 // The insert list is a prefix of the extract list
1325 // We can simply remove the common indices from the extract and make it
1326 // operate on the inserted value instead of the insertvalue result.
1327 // i.e., replace
1328 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1329 // %E = extractvalue { i32, { i32 } } %I, 1, 0
1330 // with
1331 // %E extractvalue { i32 } { i32 42 }, 0
1332 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
1333 makeArrayRef(exti, exte));
1334 }
1335 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
1336 // We're extracting from an intrinsic, see if we're the only user, which
1337 // allows us to simplify multiple result intrinsics to simpler things that
1338 // just get one value.
1339 if (II->hasOneUse()) {
1340 // Check if we're grabbing the overflow bit or the result of a 'with
1341 // overflow' intrinsic. If it's the latter we can remove the intrinsic
1342 // and replace it with a traditional binary instruction.
1343 switch (II->getIntrinsicID()) {
1344 case Intrinsic::uadd_with_overflow:
1345 case Intrinsic::sadd_with_overflow:
1346 if (*EV.idx_begin() == 0) { // Normal result.
1347 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1348 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1349 EraseInstFromFunction(*II);
1350 return BinaryOperator::CreateAdd(LHS, RHS);
1351 }
1352
1353 // If the normal result of the add is dead, and the RHS is a constant,
1354 // we can transform this into a range comparison.
1355 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
1356 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
1357 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
1358 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
1359 ConstantExpr::getNot(CI));
1360 break;
1361 case Intrinsic::usub_with_overflow:
1362 case Intrinsic::ssub_with_overflow:
1363 if (*EV.idx_begin() == 0) { // Normal result.
1364 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1365 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1366 EraseInstFromFunction(*II);
1367 return BinaryOperator::CreateSub(LHS, RHS);
1368 }
1369 break;
1370 case Intrinsic::umul_with_overflow:
1371 case Intrinsic::smul_with_overflow:
1372 if (*EV.idx_begin() == 0) { // Normal result.
1373 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1374 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1375 EraseInstFromFunction(*II);
1376 return BinaryOperator::CreateMul(LHS, RHS);
1377 }
1378 break;
1379 default:
1380 break;
1381 }
1382 }
1383 }
1384 if (LoadInst *L = dyn_cast<LoadInst>(Agg))
1385 // If the (non-volatile) load only has one use, we can rewrite this to a
1386 // load from a GEP. This reduces the size of the load.
1387 // FIXME: If a load is used only by extractvalue instructions then this
1388 // could be done regardless of having multiple uses.
1389 if (L->isSimple() && L->hasOneUse()) {
1390 // extractvalue has integer indices, getelementptr has Value*s. Convert.
1391 SmallVector<Value*, 4> Indices;
1392 // Prefix an i32 0 since we need the first element.
1393 Indices.push_back(Builder->getInt32(0));
1394 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
1395 I != E; ++I)
1396 Indices.push_back(Builder->getInt32(*I));
1397
1398 // We need to insert these at the location of the old load, not at that of
1399 // the extractvalue.
1400 Builder->SetInsertPoint(L->getParent(), L);
1401 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
1402 // Returning the load directly will cause the main loop to insert it in
1403 // the wrong spot, so use ReplaceInstUsesWith().
1404 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
1405 }
1406 // We could simplify extracts from other values. Note that nested extracts may
1407 // already be simplified implicitly by the above: extract (extract (insert) )
1408 // will be translated into extract ( insert ( extract ) ) first and then just
1409 // the value inserted, if appropriate. Similarly for extracts from single-use
1410 // loads: extract (extract (load)) will be translated to extract (load (gep))
1411 // and if again single-use then via load (gep (gep)) to load (gep).
1412 // However, double extracts from e.g. function arguments or return values
1413 // aren't handled yet.
1414 return 0;
1415 }
1416
1417 enum Personality_Type {
1418 Unknown_Personality,
1419 GNU_Ada_Personality,
1420 GNU_CXX_Personality,
1421 GNU_ObjC_Personality
1422 };
1423
1424 /// RecognizePersonality - See if the given exception handling personality
1425 /// function is one that we understand. If so, return a description of it;
1426 /// otherwise return Unknown_Personality.
RecognizePersonality(Value * Pers)1427 static Personality_Type RecognizePersonality(Value *Pers) {
1428 Function *F = dyn_cast<Function>(Pers->stripPointerCasts());
1429 if (!F)
1430 return Unknown_Personality;
1431 return StringSwitch<Personality_Type>(F->getName())
1432 .Case("__gnat_eh_personality", GNU_Ada_Personality)
1433 .Case("__gxx_personality_v0", GNU_CXX_Personality)
1434 .Case("__objc_personality_v0", GNU_ObjC_Personality)
1435 .Default(Unknown_Personality);
1436 }
1437
1438 /// isCatchAll - Return 'true' if the given typeinfo will match anything.
isCatchAll(Personality_Type Personality,Constant * TypeInfo)1439 static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
1440 switch (Personality) {
1441 case Unknown_Personality:
1442 return false;
1443 case GNU_Ada_Personality:
1444 // While __gnat_all_others_value will match any Ada exception, it doesn't
1445 // match foreign exceptions (or didn't, before gcc-4.7).
1446 return false;
1447 case GNU_CXX_Personality:
1448 case GNU_ObjC_Personality:
1449 return TypeInfo->isNullValue();
1450 }
1451 llvm_unreachable("Unknown personality!");
1452 }
1453
shorter_filter(const Value * LHS,const Value * RHS)1454 static bool shorter_filter(const Value *LHS, const Value *RHS) {
1455 return
1456 cast<ArrayType>(LHS->getType())->getNumElements()
1457 <
1458 cast<ArrayType>(RHS->getType())->getNumElements();
1459 }
1460
visitLandingPadInst(LandingPadInst & LI)1461 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
1462 // The logic here should be correct for any real-world personality function.
1463 // However if that turns out not to be true, the offending logic can always
1464 // be conditioned on the personality function, like the catch-all logic is.
1465 Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn());
1466
1467 // Simplify the list of clauses, eg by removing repeated catch clauses
1468 // (these are often created by inlining).
1469 bool MakeNewInstruction = false; // If true, recreate using the following:
1470 SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
1471 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
1472
1473 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
1474 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
1475 bool isLastClause = i + 1 == e;
1476 if (LI.isCatch(i)) {
1477 // A catch clause.
1478 Value *CatchClause = LI.getClause(i);
1479 Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
1480
1481 // If we already saw this clause, there is no point in having a second
1482 // copy of it.
1483 if (AlreadyCaught.insert(TypeInfo)) {
1484 // This catch clause was not already seen.
1485 NewClauses.push_back(CatchClause);
1486 } else {
1487 // Repeated catch clause - drop the redundant copy.
1488 MakeNewInstruction = true;
1489 }
1490
1491 // If this is a catch-all then there is no point in keeping any following
1492 // clauses or marking the landingpad as having a cleanup.
1493 if (isCatchAll(Personality, TypeInfo)) {
1494 if (!isLastClause)
1495 MakeNewInstruction = true;
1496 CleanupFlag = false;
1497 break;
1498 }
1499 } else {
1500 // A filter clause. If any of the filter elements were already caught
1501 // then they can be dropped from the filter. It is tempting to try to
1502 // exploit the filter further by saying that any typeinfo that does not
1503 // occur in the filter can't be caught later (and thus can be dropped).
1504 // However this would be wrong, since typeinfos can match without being
1505 // equal (for example if one represents a C++ class, and the other some
1506 // class derived from it).
1507 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
1508 Value *FilterClause = LI.getClause(i);
1509 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
1510 unsigned NumTypeInfos = FilterType->getNumElements();
1511
1512 // An empty filter catches everything, so there is no point in keeping any
1513 // following clauses or marking the landingpad as having a cleanup. By
1514 // dealing with this case here the following code is made a bit simpler.
1515 if (!NumTypeInfos) {
1516 NewClauses.push_back(FilterClause);
1517 if (!isLastClause)
1518 MakeNewInstruction = true;
1519 CleanupFlag = false;
1520 break;
1521 }
1522
1523 bool MakeNewFilter = false; // If true, make a new filter.
1524 SmallVector<Constant *, 16> NewFilterElts; // New elements.
1525 if (isa<ConstantAggregateZero>(FilterClause)) {
1526 // Not an empty filter - it contains at least one null typeinfo.
1527 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
1528 Constant *TypeInfo =
1529 Constant::getNullValue(FilterType->getElementType());
1530 // If this typeinfo is a catch-all then the filter can never match.
1531 if (isCatchAll(Personality, TypeInfo)) {
1532 // Throw the filter away.
1533 MakeNewInstruction = true;
1534 continue;
1535 }
1536
1537 // There is no point in having multiple copies of this typeinfo, so
1538 // discard all but the first copy if there is more than one.
1539 NewFilterElts.push_back(TypeInfo);
1540 if (NumTypeInfos > 1)
1541 MakeNewFilter = true;
1542 } else {
1543 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
1544 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
1545 NewFilterElts.reserve(NumTypeInfos);
1546
1547 // Remove any filter elements that were already caught or that already
1548 // occurred in the filter. While there, see if any of the elements are
1549 // catch-alls. If so, the filter can be discarded.
1550 bool SawCatchAll = false;
1551 for (unsigned j = 0; j != NumTypeInfos; ++j) {
1552 Value *Elt = Filter->getOperand(j);
1553 Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
1554 if (isCatchAll(Personality, TypeInfo)) {
1555 // This element is a catch-all. Bail out, noting this fact.
1556 SawCatchAll = true;
1557 break;
1558 }
1559 if (AlreadyCaught.count(TypeInfo))
1560 // Already caught by an earlier clause, so having it in the filter
1561 // is pointless.
1562 continue;
1563 // There is no point in having multiple copies of the same typeinfo in
1564 // a filter, so only add it if we didn't already.
1565 if (SeenInFilter.insert(TypeInfo))
1566 NewFilterElts.push_back(cast<Constant>(Elt));
1567 }
1568 // A filter containing a catch-all cannot match anything by definition.
1569 if (SawCatchAll) {
1570 // Throw the filter away.
1571 MakeNewInstruction = true;
1572 continue;
1573 }
1574
1575 // If we dropped something from the filter, make a new one.
1576 if (NewFilterElts.size() < NumTypeInfos)
1577 MakeNewFilter = true;
1578 }
1579 if (MakeNewFilter) {
1580 FilterType = ArrayType::get(FilterType->getElementType(),
1581 NewFilterElts.size());
1582 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
1583 MakeNewInstruction = true;
1584 }
1585
1586 NewClauses.push_back(FilterClause);
1587
1588 // If the new filter is empty then it will catch everything so there is
1589 // no point in keeping any following clauses or marking the landingpad
1590 // as having a cleanup. The case of the original filter being empty was
1591 // already handled above.
1592 if (MakeNewFilter && !NewFilterElts.size()) {
1593 assert(MakeNewInstruction && "New filter but not a new instruction!");
1594 CleanupFlag = false;
1595 break;
1596 }
1597 }
1598 }
1599
1600 // If several filters occur in a row then reorder them so that the shortest
1601 // filters come first (those with the smallest number of elements). This is
1602 // advantageous because shorter filters are more likely to match, speeding up
1603 // unwinding, but mostly because it increases the effectiveness of the other
1604 // filter optimizations below.
1605 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
1606 unsigned j;
1607 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
1608 for (j = i; j != e; ++j)
1609 if (!isa<ArrayType>(NewClauses[j]->getType()))
1610 break;
1611
1612 // Check whether the filters are already sorted by length. We need to know
1613 // if sorting them is actually going to do anything so that we only make a
1614 // new landingpad instruction if it does.
1615 for (unsigned k = i; k + 1 < j; ++k)
1616 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
1617 // Not sorted, so sort the filters now. Doing an unstable sort would be
1618 // correct too but reordering filters pointlessly might confuse users.
1619 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
1620 shorter_filter);
1621 MakeNewInstruction = true;
1622 break;
1623 }
1624
1625 // Look for the next batch of filters.
1626 i = j + 1;
1627 }
1628
1629 // If typeinfos matched if and only if equal, then the elements of a filter L
1630 // that occurs later than a filter F could be replaced by the intersection of
1631 // the elements of F and L. In reality two typeinfos can match without being
1632 // equal (for example if one represents a C++ class, and the other some class
1633 // derived from it) so it would be wrong to perform this transform in general.
1634 // However the transform is correct and useful if F is a subset of L. In that
1635 // case L can be replaced by F, and thus removed altogether since repeating a
1636 // filter is pointless. So here we look at all pairs of filters F and L where
1637 // L follows F in the list of clauses, and remove L if every element of F is
1638 // an element of L. This can occur when inlining C++ functions with exception
1639 // specifications.
1640 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
1641 // Examine each filter in turn.
1642 Value *Filter = NewClauses[i];
1643 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
1644 if (!FTy)
1645 // Not a filter - skip it.
1646 continue;
1647 unsigned FElts = FTy->getNumElements();
1648 // Examine each filter following this one. Doing this backwards means that
1649 // we don't have to worry about filters disappearing under us when removed.
1650 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
1651 Value *LFilter = NewClauses[j];
1652 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
1653 if (!LTy)
1654 // Not a filter - skip it.
1655 continue;
1656 // If Filter is a subset of LFilter, i.e. every element of Filter is also
1657 // an element of LFilter, then discard LFilter.
1658 SmallVector<Value *, 16>::iterator J = NewClauses.begin() + j;
1659 // If Filter is empty then it is a subset of LFilter.
1660 if (!FElts) {
1661 // Discard LFilter.
1662 NewClauses.erase(J);
1663 MakeNewInstruction = true;
1664 // Move on to the next filter.
1665 continue;
1666 }
1667 unsigned LElts = LTy->getNumElements();
1668 // If Filter is longer than LFilter then it cannot be a subset of it.
1669 if (FElts > LElts)
1670 // Move on to the next filter.
1671 continue;
1672 // At this point we know that LFilter has at least one element.
1673 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
1674 // Filter is a subset of LFilter iff Filter contains only zeros (as we
1675 // already know that Filter is not longer than LFilter).
1676 if (isa<ConstantAggregateZero>(Filter)) {
1677 assert(FElts <= LElts && "Should have handled this case earlier!");
1678 // Discard LFilter.
1679 NewClauses.erase(J);
1680 MakeNewInstruction = true;
1681 }
1682 // Move on to the next filter.
1683 continue;
1684 }
1685 ConstantArray *LArray = cast<ConstantArray>(LFilter);
1686 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
1687 // Since Filter is non-empty and contains only zeros, it is a subset of
1688 // LFilter iff LFilter contains a zero.
1689 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
1690 for (unsigned l = 0; l != LElts; ++l)
1691 if (LArray->getOperand(l)->isNullValue()) {
1692 // LFilter contains a zero - discard it.
1693 NewClauses.erase(J);
1694 MakeNewInstruction = true;
1695 break;
1696 }
1697 // Move on to the next filter.
1698 continue;
1699 }
1700 // At this point we know that both filters are ConstantArrays. Loop over
1701 // operands to see whether every element of Filter is also an element of
1702 // LFilter. Since filters tend to be short this is probably faster than
1703 // using a method that scales nicely.
1704 ConstantArray *FArray = cast<ConstantArray>(Filter);
1705 bool AllFound = true;
1706 for (unsigned f = 0; f != FElts; ++f) {
1707 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
1708 AllFound = false;
1709 for (unsigned l = 0; l != LElts; ++l) {
1710 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
1711 if (LTypeInfo == FTypeInfo) {
1712 AllFound = true;
1713 break;
1714 }
1715 }
1716 if (!AllFound)
1717 break;
1718 }
1719 if (AllFound) {
1720 // Discard LFilter.
1721 NewClauses.erase(J);
1722 MakeNewInstruction = true;
1723 }
1724 // Move on to the next filter.
1725 }
1726 }
1727
1728 // If we changed any of the clauses, replace the old landingpad instruction
1729 // with a new one.
1730 if (MakeNewInstruction) {
1731 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
1732 LI.getPersonalityFn(),
1733 NewClauses.size());
1734 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
1735 NLI->addClause(NewClauses[i]);
1736 // A landing pad with no clauses must have the cleanup flag set. It is
1737 // theoretically possible, though highly unlikely, that we eliminated all
1738 // clauses. If so, force the cleanup flag to true.
1739 if (NewClauses.empty())
1740 CleanupFlag = true;
1741 NLI->setCleanup(CleanupFlag);
1742 return NLI;
1743 }
1744
1745 // Even if none of the clauses changed, we may nonetheless have understood
1746 // that the cleanup flag is pointless. Clear it if so.
1747 if (LI.isCleanup() != CleanupFlag) {
1748 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
1749 LI.setCleanup(CleanupFlag);
1750 return &LI;
1751 }
1752
1753 return 0;
1754 }
1755
1756
1757
1758
1759 /// TryToSinkInstruction - Try to move the specified instruction from its
1760 /// current block into the beginning of DestBlock, which can only happen if it's
1761 /// safe to move the instruction past all of the instructions between it and the
1762 /// end of its block.
TryToSinkInstruction(Instruction * I,BasicBlock * DestBlock)1763 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
1764 assert(I->hasOneUse() && "Invariants didn't hold!");
1765
1766 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
1767 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
1768 isa<TerminatorInst>(I))
1769 return false;
1770
1771 // Do not sink alloca instructions out of the entry block.
1772 if (isa<AllocaInst>(I) && I->getParent() ==
1773 &DestBlock->getParent()->getEntryBlock())
1774 return false;
1775
1776 // We can only sink load instructions if there is nothing between the load and
1777 // the end of block that could change the value.
1778 if (I->mayReadFromMemory()) {
1779 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
1780 Scan != E; ++Scan)
1781 if (Scan->mayWriteToMemory())
1782 return false;
1783 }
1784
1785 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
1786 I->moveBefore(InsertPos);
1787 ++NumSunkInst;
1788 return true;
1789 }
1790
1791
1792 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
1793 /// all reachable code to the worklist.
1794 ///
1795 /// This has a couple of tricks to make the code faster and more powerful. In
1796 /// particular, we constant fold and DCE instructions as we go, to avoid adding
1797 /// them to the worklist (this significantly speeds up instcombine on code where
1798 /// many instructions are dead or constant). Additionally, if we find a branch
1799 /// whose condition is a known constant, we only visit the reachable successors.
1800 ///
AddReachableCodeToWorklist(BasicBlock * BB,SmallPtrSet<BasicBlock *,64> & Visited,InstCombiner & IC,const TargetData * TD,const TargetLibraryInfo * TLI)1801 static bool AddReachableCodeToWorklist(BasicBlock *BB,
1802 SmallPtrSet<BasicBlock*, 64> &Visited,
1803 InstCombiner &IC,
1804 const TargetData *TD,
1805 const TargetLibraryInfo *TLI) {
1806 bool MadeIRChange = false;
1807 SmallVector<BasicBlock*, 256> Worklist;
1808 Worklist.push_back(BB);
1809
1810 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
1811 DenseMap<ConstantExpr*, Constant*> FoldedConstants;
1812
1813 do {
1814 BB = Worklist.pop_back_val();
1815
1816 // We have now visited this block! If we've already been here, ignore it.
1817 if (!Visited.insert(BB)) continue;
1818
1819 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
1820 Instruction *Inst = BBI++;
1821
1822 // DCE instruction if trivially dead.
1823 if (isInstructionTriviallyDead(Inst)) {
1824 ++NumDeadInst;
1825 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
1826 Inst->eraseFromParent();
1827 continue;
1828 }
1829
1830 // ConstantProp instruction if trivially constant.
1831 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
1832 if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
1833 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
1834 << *Inst << '\n');
1835 Inst->replaceAllUsesWith(C);
1836 ++NumConstProp;
1837 Inst->eraseFromParent();
1838 continue;
1839 }
1840
1841 if (TD) {
1842 // See if we can constant fold its operands.
1843 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
1844 i != e; ++i) {
1845 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
1846 if (CE == 0) continue;
1847
1848 Constant*& FoldRes = FoldedConstants[CE];
1849 if (!FoldRes)
1850 FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
1851 if (!FoldRes)
1852 FoldRes = CE;
1853
1854 if (FoldRes != CE) {
1855 *i = FoldRes;
1856 MadeIRChange = true;
1857 }
1858 }
1859 }
1860
1861 InstrsForInstCombineWorklist.push_back(Inst);
1862 }
1863
1864 // Recursively visit successors. If this is a branch or switch on a
1865 // constant, only visit the reachable successor.
1866 TerminatorInst *TI = BB->getTerminator();
1867 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1868 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
1869 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
1870 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
1871 Worklist.push_back(ReachableBB);
1872 continue;
1873 }
1874 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1875 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
1876 // See if this is an explicit destination.
1877 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
1878 i != e; ++i)
1879 if (i.getCaseValue() == Cond) {
1880 BasicBlock *ReachableBB = i.getCaseSuccessor();
1881 Worklist.push_back(ReachableBB);
1882 continue;
1883 }
1884
1885 // Otherwise it is the default destination.
1886 Worklist.push_back(SI->getDefaultDest());
1887 continue;
1888 }
1889 }
1890
1891 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1892 Worklist.push_back(TI->getSuccessor(i));
1893 } while (!Worklist.empty());
1894
1895 // Once we've found all of the instructions to add to instcombine's worklist,
1896 // add them in reverse order. This way instcombine will visit from the top
1897 // of the function down. This jives well with the way that it adds all uses
1898 // of instructions to the worklist after doing a transformation, thus avoiding
1899 // some N^2 behavior in pathological cases.
1900 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
1901 InstrsForInstCombineWorklist.size());
1902
1903 return MadeIRChange;
1904 }
1905
DoOneIteration(Function & F,unsigned Iteration)1906 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
1907 MadeIRChange = false;
1908
1909 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
1910 << F.getName() << "\n");
1911
1912 {
1913 // Do a depth-first traversal of the function, populate the worklist with
1914 // the reachable instructions. Ignore blocks that are not reachable. Keep
1915 // track of which blocks we visit.
1916 SmallPtrSet<BasicBlock*, 64> Visited;
1917 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,
1918 TLI);
1919
1920 // Do a quick scan over the function. If we find any blocks that are
1921 // unreachable, remove any instructions inside of them. This prevents
1922 // the instcombine code from having to deal with some bad special cases.
1923 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
1924 if (Visited.count(BB)) continue;
1925
1926 // Delete the instructions backwards, as it has a reduced likelihood of
1927 // having to update as many def-use and use-def chains.
1928 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1929 while (EndInst != BB->begin()) {
1930 // Delete the next to last instruction.
1931 BasicBlock::iterator I = EndInst;
1932 Instruction *Inst = --I;
1933 if (!Inst->use_empty())
1934 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1935 if (isa<LandingPadInst>(Inst)) {
1936 EndInst = Inst;
1937 continue;
1938 }
1939 if (!isa<DbgInfoIntrinsic>(Inst)) {
1940 ++NumDeadInst;
1941 MadeIRChange = true;
1942 }
1943 Inst->eraseFromParent();
1944 }
1945 }
1946 }
1947
1948 while (!Worklist.isEmpty()) {
1949 Instruction *I = Worklist.RemoveOne();
1950 if (I == 0) continue; // skip null values.
1951
1952 // Check to see if we can DCE the instruction.
1953 if (isInstructionTriviallyDead(I)) {
1954 DEBUG(errs() << "IC: DCE: " << *I << '\n');
1955 EraseInstFromFunction(*I);
1956 ++NumDeadInst;
1957 MadeIRChange = true;
1958 continue;
1959 }
1960
1961 // Instruction isn't dead, see if we can constant propagate it.
1962 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
1963 if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
1964 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
1965
1966 // Add operands to the worklist.
1967 ReplaceInstUsesWith(*I, C);
1968 ++NumConstProp;
1969 EraseInstFromFunction(*I);
1970 MadeIRChange = true;
1971 continue;
1972 }
1973
1974 // See if we can trivially sink this instruction to a successor basic block.
1975 if (I->hasOneUse()) {
1976 BasicBlock *BB = I->getParent();
1977 Instruction *UserInst = cast<Instruction>(I->use_back());
1978 BasicBlock *UserParent;
1979
1980 // Get the block the use occurs in.
1981 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
1982 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
1983 else
1984 UserParent = UserInst->getParent();
1985
1986 if (UserParent != BB) {
1987 bool UserIsSuccessor = false;
1988 // See if the user is one of our successors.
1989 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
1990 if (*SI == UserParent) {
1991 UserIsSuccessor = true;
1992 break;
1993 }
1994
1995 // If the user is one of our immediate successors, and if that successor
1996 // only has us as a predecessors (we'd have to split the critical edge
1997 // otherwise), we can keep going.
1998 if (UserIsSuccessor && UserParent->getSinglePredecessor())
1999 // Okay, the CFG is simple enough, try to sink this instruction.
2000 MadeIRChange |= TryToSinkInstruction(I, UserParent);
2001 }
2002 }
2003
2004 // Now that we have an instruction, try combining it to simplify it.
2005 Builder->SetInsertPoint(I->getParent(), I);
2006 Builder->SetCurrentDebugLocation(I->getDebugLoc());
2007
2008 #ifndef NDEBUG
2009 std::string OrigI;
2010 #endif
2011 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
2012 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
2013
2014 if (Instruction *Result = visit(*I)) {
2015 ++NumCombined;
2016 // Should we replace the old instruction with a new one?
2017 if (Result != I) {
2018 DEBUG(errs() << "IC: Old = " << *I << '\n'
2019 << " New = " << *Result << '\n');
2020
2021 if (!I->getDebugLoc().isUnknown())
2022 Result->setDebugLoc(I->getDebugLoc());
2023 // Everything uses the new instruction now.
2024 I->replaceAllUsesWith(Result);
2025
2026 // Move the name to the new instruction first.
2027 Result->takeName(I);
2028
2029 // Push the new instruction and any users onto the worklist.
2030 Worklist.Add(Result);
2031 Worklist.AddUsersToWorkList(*Result);
2032
2033 // Insert the new instruction into the basic block...
2034 BasicBlock *InstParent = I->getParent();
2035 BasicBlock::iterator InsertPos = I;
2036
2037 // If we replace a PHI with something that isn't a PHI, fix up the
2038 // insertion point.
2039 if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
2040 InsertPos = InstParent->getFirstInsertionPt();
2041
2042 InstParent->getInstList().insert(InsertPos, Result);
2043
2044 EraseInstFromFunction(*I);
2045 } else {
2046 #ifndef NDEBUG
2047 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
2048 << " New = " << *I << '\n');
2049 #endif
2050
2051 // If the instruction was modified, it's possible that it is now dead.
2052 // if so, remove it.
2053 if (isInstructionTriviallyDead(I)) {
2054 EraseInstFromFunction(*I);
2055 } else {
2056 Worklist.Add(I);
2057 Worklist.AddUsersToWorkList(*I);
2058 }
2059 }
2060 MadeIRChange = true;
2061 }
2062 }
2063
2064 Worklist.Zap();
2065 return MadeIRChange;
2066 }
2067
2068
runOnFunction(Function & F)2069 bool InstCombiner::runOnFunction(Function &F) {
2070 TD = getAnalysisIfAvailable<TargetData>();
2071 TLI = &getAnalysis<TargetLibraryInfo>();
2072
2073 /// Builder - This is an IRBuilder that automatically inserts new
2074 /// instructions into the worklist when they are created.
2075 IRBuilder<true, TargetFolder, InstCombineIRInserter>
2076 TheBuilder(F.getContext(), TargetFolder(TD),
2077 InstCombineIRInserter(Worklist));
2078 Builder = &TheBuilder;
2079
2080 bool EverMadeChange = false;
2081
2082 // Lower dbg.declare intrinsics otherwise their value may be clobbered
2083 // by instcombiner.
2084 EverMadeChange = LowerDbgDeclare(F);
2085
2086 // Iterate while there is work to do.
2087 unsigned Iteration = 0;
2088 while (DoOneIteration(F, Iteration++))
2089 EverMadeChange = true;
2090
2091 Builder = 0;
2092 return EverMadeChange;
2093 }
2094
createInstructionCombiningPass()2095 FunctionPass *llvm::createInstructionCombiningPass() {
2096 return new InstCombiner();
2097 }
2098