• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/FloatingPointMode.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumeBundleQueries.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/Loads.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/IntrinsicsAArch64.h"
48 #include "llvm/IR/IntrinsicsAMDGPU.h"
49 #include "llvm/IR/IntrinsicsARM.h"
50 #include "llvm/IR/IntrinsicsHexagon.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/PatternMatch.h"
54 #include "llvm/IR/Statepoint.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/IR/ValueHandle.h"
59 #include "llvm/Support/AtomicOrdering.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Compiler.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/KnownBits.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
69 #include "llvm/Transforms/InstCombine/InstCombiner.h"
70 #include "llvm/Transforms/Utils/Local.h"
71 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <cstring>
76 #include <utility>
77 #include <vector>
78 
79 using namespace llvm;
80 using namespace PatternMatch;
81 
82 #define DEBUG_TYPE "instcombine"
83 
84 STATISTIC(NumSimplified, "Number of library calls simplified");
85 
86 static cl::opt<unsigned> GuardWideningWindow(
87     "instcombine-guard-widening-window",
88     cl::init(3),
89     cl::desc("How wide an instruction window to bypass looking for "
90              "another guard"));
91 
92 /// Return the specified type promoted as it would be to pass though a va_arg
93 /// area.
getPromotedType(Type * Ty)94 static Type *getPromotedType(Type *Ty) {
95   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
96     if (ITy->getBitWidth() < 32)
97       return Type::getInt32Ty(Ty->getContext());
98   }
99   return Ty;
100 }
101 
SimplifyAnyMemTransfer(AnyMemTransferInst * MI)102 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
103   Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
104   MaybeAlign CopyDstAlign = MI->getDestAlign();
105   if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
106     MI->setDestAlignment(DstAlign);
107     return MI;
108   }
109 
110   Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
111   MaybeAlign CopySrcAlign = MI->getSourceAlign();
112   if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
113     MI->setSourceAlignment(SrcAlign);
114     return MI;
115   }
116 
117   // If we have a store to a location which is known constant, we can conclude
118   // that the store must be storing the constant value (else the memory
119   // wouldn't be constant), and this must be a noop.
120   if (AA->pointsToConstantMemory(MI->getDest())) {
121     // Set the size of the copy to 0, it will be deleted on the next iteration.
122     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
123     return MI;
124   }
125 
126   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
127   // load/store.
128   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
129   if (!MemOpLength) return nullptr;
130 
131   // Source and destination pointer types are always "i8*" for intrinsic.  See
132   // if the size is something we can handle with a single primitive load/store.
133   // A single load+store correctly handles overlapping memory in the memmove
134   // case.
135   uint64_t Size = MemOpLength->getLimitedValue();
136   assert(Size && "0-sized memory transferring should be removed already.");
137 
138   if (Size > 8 || (Size&(Size-1)))
139     return nullptr;  // If not 1/2/4/8 bytes, exit.
140 
141   // If it is an atomic and alignment is less than the size then we will
142   // introduce the unaligned memory access which will be later transformed
143   // into libcall in CodeGen. This is not evident performance gain so disable
144   // it now.
145   if (isa<AtomicMemTransferInst>(MI))
146     if (*CopyDstAlign < Size || *CopySrcAlign < Size)
147       return nullptr;
148 
149   // Use an integer load+store unless we can find something better.
150   unsigned SrcAddrSp =
151     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
152   unsigned DstAddrSp =
153     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
154 
155   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
156   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
157   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
158 
159   // If the memcpy has metadata describing the members, see if we can get the
160   // TBAA tag describing our copy.
161   MDNode *CopyMD = nullptr;
162   if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
163     CopyMD = M;
164   } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
165     if (M->getNumOperands() == 3 && M->getOperand(0) &&
166         mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
167         mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
168         M->getOperand(1) &&
169         mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
170         mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
171         Size &&
172         M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
173       CopyMD = cast<MDNode>(M->getOperand(2));
174   }
175 
176   Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
177   Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
178   LoadInst *L = Builder.CreateLoad(IntType, Src);
179   // Alignment from the mem intrinsic will be better, so use it.
180   L->setAlignment(*CopySrcAlign);
181   if (CopyMD)
182     L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
183   MDNode *LoopMemParallelMD =
184     MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
185   if (LoopMemParallelMD)
186     L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
187   MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
188   if (AccessGroupMD)
189     L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
190 
191   StoreInst *S = Builder.CreateStore(L, Dest);
192   // Alignment from the mem intrinsic will be better, so use it.
193   S->setAlignment(*CopyDstAlign);
194   if (CopyMD)
195     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
196   if (LoopMemParallelMD)
197     S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
198   if (AccessGroupMD)
199     S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
200 
201   if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
202     // non-atomics can be volatile
203     L->setVolatile(MT->isVolatile());
204     S->setVolatile(MT->isVolatile());
205   }
206   if (isa<AtomicMemTransferInst>(MI)) {
207     // atomics have to be unordered
208     L->setOrdering(AtomicOrdering::Unordered);
209     S->setOrdering(AtomicOrdering::Unordered);
210   }
211 
212   // Set the size of the copy to 0, it will be deleted on the next iteration.
213   MI->setLength(Constant::getNullValue(MemOpLength->getType()));
214   return MI;
215 }
216 
SimplifyAnyMemSet(AnyMemSetInst * MI)217 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
218   const Align KnownAlignment =
219       getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
220   MaybeAlign MemSetAlign = MI->getDestAlign();
221   if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
222     MI->setDestAlignment(KnownAlignment);
223     return MI;
224   }
225 
226   // If we have a store to a location which is known constant, we can conclude
227   // that the store must be storing the constant value (else the memory
228   // wouldn't be constant), and this must be a noop.
229   if (AA->pointsToConstantMemory(MI->getDest())) {
230     // Set the size of the copy to 0, it will be deleted on the next iteration.
231     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
232     return MI;
233   }
234 
235   // Extract the length and alignment and fill if they are constant.
236   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
237   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
238   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
239     return nullptr;
240   const uint64_t Len = LenC->getLimitedValue();
241   assert(Len && "0-sized memory setting should be removed already.");
242   const Align Alignment = assumeAligned(MI->getDestAlignment());
243 
244   // If it is an atomic and alignment is less than the size then we will
245   // introduce the unaligned memory access which will be later transformed
246   // into libcall in CodeGen. This is not evident performance gain so disable
247   // it now.
248   if (isa<AtomicMemSetInst>(MI))
249     if (Alignment < Len)
250       return nullptr;
251 
252   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
253   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
254     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
255 
256     Value *Dest = MI->getDest();
257     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
258     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
259     Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
260 
261     // Extract the fill value and store.
262     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
263     StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
264                                        MI->isVolatile());
265     S->setAlignment(Alignment);
266     if (isa<AtomicMemSetInst>(MI))
267       S->setOrdering(AtomicOrdering::Unordered);
268 
269     // Set the size of the copy to 0, it will be deleted on the next iteration.
270     MI->setLength(Constant::getNullValue(LenC->getType()));
271     return MI;
272   }
273 
274   return nullptr;
275 }
276 
277 // TODO, Obvious Missing Transforms:
278 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedLoad(IntrinsicInst & II)279 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
280   Value *LoadPtr = II.getArgOperand(0);
281   const Align Alignment =
282       cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
283 
284   // If the mask is all ones or undefs, this is a plain vector load of the 1st
285   // argument.
286   if (maskIsAllOneOrUndef(II.getArgOperand(2)))
287     return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
288                                      "unmaskedload");
289 
290   // If we can unconditionally load from this address, replace with a
291   // load/select idiom. TODO: use DT for context sensitive query
292   if (isDereferenceablePointer(LoadPtr, II.getType(),
293                                II.getModule()->getDataLayout(), &II, nullptr)) {
294     Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
295                                          "unmaskedload");
296     return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
297   }
298 
299   return nullptr;
300 }
301 
302 // TODO, Obvious Missing Transforms:
303 // * Single constant active lane -> store
304 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedStore(IntrinsicInst & II)305 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
306   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
307   if (!ConstMask)
308     return nullptr;
309 
310   // If the mask is all zeros, this instruction does nothing.
311   if (ConstMask->isNullValue())
312     return eraseInstFromFunction(II);
313 
314   // If the mask is all ones, this is a plain vector store of the 1st argument.
315   if (ConstMask->isAllOnesValue()) {
316     Value *StorePtr = II.getArgOperand(1);
317     Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
318     return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
319   }
320 
321   if (isa<ScalableVectorType>(ConstMask->getType()))
322     return nullptr;
323 
324   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
325   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
326   APInt UndefElts(DemandedElts.getBitWidth(), 0);
327   if (Value *V =
328           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
329     return replaceOperand(II, 0, V);
330 
331   return nullptr;
332 }
333 
334 // TODO, Obvious Missing Transforms:
335 // * Single constant active lane load -> load
336 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
337 // * Adjacent vector addresses -> masked.load
338 // * Narrow width by halfs excluding zero/undef lanes
339 // * Vector splat address w/known mask -> scalar load
340 // * Vector incrementing address -> vector masked load
simplifyMaskedGather(IntrinsicInst & II)341 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
342   return nullptr;
343 }
344 
345 // TODO, Obvious Missing Transforms:
346 // * Single constant active lane -> store
347 // * Adjacent vector addresses -> masked.store
348 // * Narrow store width by halfs excluding zero/undef lanes
349 // * Vector splat address w/known mask -> scalar store
350 // * Vector incrementing address -> vector masked store
simplifyMaskedScatter(IntrinsicInst & II)351 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
352   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
353   if (!ConstMask)
354     return nullptr;
355 
356   // If the mask is all zeros, a scatter does nothing.
357   if (ConstMask->isNullValue())
358     return eraseInstFromFunction(II);
359 
360   if (isa<ScalableVectorType>(ConstMask->getType()))
361     return nullptr;
362 
363   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
364   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
365   APInt UndefElts(DemandedElts.getBitWidth(), 0);
366   if (Value *V =
367           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
368     return replaceOperand(II, 0, V);
369   if (Value *V =
370           SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts))
371     return replaceOperand(II, 1, V);
372 
373   return nullptr;
374 }
375 
376 /// This function transforms launder.invariant.group and strip.invariant.group
377 /// like:
378 /// launder(launder(%x)) -> launder(%x)       (the result is not the argument)
379 /// launder(strip(%x)) -> launder(%x)
380 /// strip(strip(%x)) -> strip(%x)             (the result is not the argument)
381 /// strip(launder(%x)) -> strip(%x)
382 /// This is legal because it preserves the most recent information about
383 /// the presence or absence of invariant.group.
simplifyInvariantGroupIntrinsic(IntrinsicInst & II,InstCombinerImpl & IC)384 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
385                                                     InstCombinerImpl &IC) {
386   auto *Arg = II.getArgOperand(0);
387   auto *StrippedArg = Arg->stripPointerCasts();
388   auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
389   if (StrippedArg == StrippedInvariantGroupsArg)
390     return nullptr; // No launders/strips to remove.
391 
392   Value *Result = nullptr;
393 
394   if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
395     Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
396   else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
397     Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
398   else
399     llvm_unreachable(
400         "simplifyInvariantGroupIntrinsic only handles launder and strip");
401   if (Result->getType()->getPointerAddressSpace() !=
402       II.getType()->getPointerAddressSpace())
403     Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
404   if (Result->getType() != II.getType())
405     Result = IC.Builder.CreateBitCast(Result, II.getType());
406 
407   return cast<Instruction>(Result);
408 }
409 
foldCttzCtlz(IntrinsicInst & II,InstCombinerImpl & IC)410 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
411   assert((II.getIntrinsicID() == Intrinsic::cttz ||
412           II.getIntrinsicID() == Intrinsic::ctlz) &&
413          "Expected cttz or ctlz intrinsic");
414   bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
415   Value *Op0 = II.getArgOperand(0);
416   Value *X;
417   // ctlz(bitreverse(x)) -> cttz(x)
418   // cttz(bitreverse(x)) -> ctlz(x)
419   if (match(Op0, m_BitReverse(m_Value(X)))) {
420     Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
421     Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
422     return CallInst::Create(F, {X, II.getArgOperand(1)});
423   }
424 
425   if (IsTZ) {
426     // cttz(-x) -> cttz(x)
427     if (match(Op0, m_Neg(m_Value(X))))
428       return IC.replaceOperand(II, 0, X);
429 
430     // cttz(abs(x)) -> cttz(x)
431     // cttz(nabs(x)) -> cttz(x)
432     Value *Y;
433     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
434     if (SPF == SPF_ABS || SPF == SPF_NABS)
435       return IC.replaceOperand(II, 0, X);
436 
437     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
438       return IC.replaceOperand(II, 0, X);
439   }
440 
441   KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
442 
443   // Create a mask for bits above (ctlz) or below (cttz) the first known one.
444   unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
445                                 : Known.countMaxLeadingZeros();
446   unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
447                                 : Known.countMinLeadingZeros();
448 
449   // If all bits above (ctlz) or below (cttz) the first known one are known
450   // zero, this value is constant.
451   // FIXME: This should be in InstSimplify because we're replacing an
452   // instruction with a constant.
453   if (PossibleZeros == DefiniteZeros) {
454     auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
455     return IC.replaceInstUsesWith(II, C);
456   }
457 
458   // If the input to cttz/ctlz is known to be non-zero,
459   // then change the 'ZeroIsUndef' parameter to 'true'
460   // because we know the zero behavior can't affect the result.
461   if (!Known.One.isNullValue() ||
462       isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
463                      &IC.getDominatorTree())) {
464     if (!match(II.getArgOperand(1), m_One()))
465       return IC.replaceOperand(II, 1, IC.Builder.getTrue());
466   }
467 
468   // Add range metadata since known bits can't completely reflect what we know.
469   // TODO: Handle splat vectors.
470   auto *IT = dyn_cast<IntegerType>(Op0->getType());
471   if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
472     Metadata *LowAndHigh[] = {
473         ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
474         ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
475     II.setMetadata(LLVMContext::MD_range,
476                    MDNode::get(II.getContext(), LowAndHigh));
477     return &II;
478   }
479 
480   return nullptr;
481 }
482 
foldCtpop(IntrinsicInst & II,InstCombinerImpl & IC)483 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
484   assert(II.getIntrinsicID() == Intrinsic::ctpop &&
485          "Expected ctpop intrinsic");
486   Type *Ty = II.getType();
487   unsigned BitWidth = Ty->getScalarSizeInBits();
488   Value *Op0 = II.getArgOperand(0);
489   Value *X;
490 
491   // ctpop(bitreverse(x)) -> ctpop(x)
492   // ctpop(bswap(x)) -> ctpop(x)
493   if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
494     return IC.replaceOperand(II, 0, X);
495 
496   // ctpop(x | -x) -> bitwidth - cttz(x, false)
497   if (Op0->hasOneUse() &&
498       match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
499     Function *F =
500         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
501     auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
502     auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
503     return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
504   }
505 
506   // ctpop(~x & (x - 1)) -> cttz(x, false)
507   if (match(Op0,
508             m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
509     Function *F =
510         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
511     return CallInst::Create(F, {X, IC.Builder.getFalse()});
512   }
513 
514   // FIXME: Try to simplify vectors of integers.
515   auto *IT = dyn_cast<IntegerType>(Ty);
516   if (!IT)
517     return nullptr;
518 
519   KnownBits Known(BitWidth);
520   IC.computeKnownBits(Op0, Known, 0, &II);
521 
522   unsigned MinCount = Known.countMinPopulation();
523   unsigned MaxCount = Known.countMaxPopulation();
524 
525   // Add range metadata since known bits can't completely reflect what we know.
526   if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
527     Metadata *LowAndHigh[] = {
528         ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
529         ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
530     II.setMetadata(LLVMContext::MD_range,
531                    MDNode::get(II.getContext(), LowAndHigh));
532     return &II;
533   }
534 
535   return nullptr;
536 }
537 
538 /// Convert a table lookup to shufflevector if the mask is constant.
539 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
540 /// which case we could lower the shufflevector with rev64 instructions
541 /// as it's actually a byte reverse.
simplifyNeonTbl1(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)542 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
543                                InstCombiner::BuilderTy &Builder) {
544   // Bail out if the mask is not a constant.
545   auto *C = dyn_cast<Constant>(II.getArgOperand(1));
546   if (!C)
547     return nullptr;
548 
549   auto *VecTy = cast<FixedVectorType>(II.getType());
550   unsigned NumElts = VecTy->getNumElements();
551 
552   // Only perform this transformation for <8 x i8> vector types.
553   if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
554     return nullptr;
555 
556   int Indexes[8];
557 
558   for (unsigned I = 0; I < NumElts; ++I) {
559     Constant *COp = C->getAggregateElement(I);
560 
561     if (!COp || !isa<ConstantInt>(COp))
562       return nullptr;
563 
564     Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
565 
566     // Make sure the mask indices are in range.
567     if ((unsigned)Indexes[I] >= NumElts)
568       return nullptr;
569   }
570 
571   auto *V1 = II.getArgOperand(0);
572   auto *V2 = Constant::getNullValue(V1->getType());
573   return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes));
574 }
575 
576 // Returns true iff the 2 intrinsics have the same operands, limiting the
577 // comparison to the first NumOperands.
haveSameOperands(const IntrinsicInst & I,const IntrinsicInst & E,unsigned NumOperands)578 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
579                              unsigned NumOperands) {
580   assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
581   assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
582   for (unsigned i = 0; i < NumOperands; i++)
583     if (I.getArgOperand(i) != E.getArgOperand(i))
584       return false;
585   return true;
586 }
587 
588 // Remove trivially empty start/end intrinsic ranges, i.e. a start
589 // immediately followed by an end (ignoring debuginfo or other
590 // start/end intrinsics in between). As this handles only the most trivial
591 // cases, tracking the nesting level is not needed:
592 //
593 //   call @llvm.foo.start(i1 0)
594 //   call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
595 //   call @llvm.foo.end(i1 0)
596 //   call @llvm.foo.end(i1 0) ; &I
597 static bool
removeTriviallyEmptyRange(IntrinsicInst & EndI,InstCombinerImpl & IC,std::function<bool (const IntrinsicInst &)> IsStart)598 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
599                           std::function<bool(const IntrinsicInst &)> IsStart) {
600   // We start from the end intrinsic and scan backwards, so that InstCombine
601   // has already processed (and potentially removed) all the instructions
602   // before the end intrinsic.
603   BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
604   for (; BI != BE; ++BI) {
605     if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
606       if (isa<DbgInfoIntrinsic>(I) ||
607           I->getIntrinsicID() == EndI.getIntrinsicID())
608         continue;
609       if (IsStart(*I)) {
610         if (haveSameOperands(EndI, *I, EndI.getNumArgOperands())) {
611           IC.eraseInstFromFunction(*I);
612           IC.eraseInstFromFunction(EndI);
613           return true;
614         }
615         // Skip start intrinsics that don't pair with this end intrinsic.
616         continue;
617       }
618     }
619     break;
620   }
621 
622   return false;
623 }
624 
visitVAEndInst(VAEndInst & I)625 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
626   removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) {
627     return I.getIntrinsicID() == Intrinsic::vastart ||
628            I.getIntrinsicID() == Intrinsic::vacopy;
629   });
630   return nullptr;
631 }
632 
canonicalizeConstantArg0ToArg1(CallInst & Call)633 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
634   assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
635   Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
636   if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
637     Call.setArgOperand(0, Arg1);
638     Call.setArgOperand(1, Arg0);
639     return &Call;
640   }
641   return nullptr;
642 }
643 
644 /// Creates a result tuple for an overflow intrinsic \p II with a given
645 /// \p Result and a constant \p Overflow value.
createOverflowTuple(IntrinsicInst * II,Value * Result,Constant * Overflow)646 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result,
647                                         Constant *Overflow) {
648   Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
649   StructType *ST = cast<StructType>(II->getType());
650   Constant *Struct = ConstantStruct::get(ST, V);
651   return InsertValueInst::Create(Struct, Result, 0);
652 }
653 
654 Instruction *
foldIntrinsicWithOverflowCommon(IntrinsicInst * II)655 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
656   WithOverflowInst *WO = cast<WithOverflowInst>(II);
657   Value *OperationResult = nullptr;
658   Constant *OverflowResult = nullptr;
659   if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
660                             WO->getRHS(), *WO, OperationResult, OverflowResult))
661     return createOverflowTuple(WO, OperationResult, OverflowResult);
662   return nullptr;
663 }
664 
getKnownSign(Value * Op,Instruction * CxtI,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)665 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
666                                    const DataLayout &DL, AssumptionCache *AC,
667                                    DominatorTree *DT) {
668   KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT);
669   if (Known.isNonNegative())
670     return false;
671   if (Known.isNegative())
672     return true;
673 
674   return isImpliedByDomCondition(
675       ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL);
676 }
677 
678 /// CallInst simplification. This mostly only handles folding of intrinsic
679 /// instructions. For normal calls, it allows visitCallBase to do the heavy
680 /// lifting.
visitCallInst(CallInst & CI)681 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
682   // Don't try to simplify calls without uses. It will not do anything useful,
683   // but will result in the following folds being skipped.
684   if (!CI.use_empty())
685     if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
686       return replaceInstUsesWith(CI, V);
687 
688   if (isFreeCall(&CI, &TLI))
689     return visitFree(CI);
690 
691   // If the caller function is nounwind, mark the call as nounwind, even if the
692   // callee isn't.
693   if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
694     CI.setDoesNotThrow();
695     return &CI;
696   }
697 
698   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
699   if (!II) return visitCallBase(CI);
700 
701   // For atomic unordered mem intrinsics if len is not a positive or
702   // not a multiple of element size then behavior is undefined.
703   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
704     if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
705       if (NumBytes->getSExtValue() < 0 ||
706           (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
707         CreateNonTerminatorUnreachable(AMI);
708         assert(AMI->getType()->isVoidTy() &&
709                "non void atomic unordered mem intrinsic");
710         return eraseInstFromFunction(*AMI);
711       }
712 
713   // Intrinsics cannot occur in an invoke or a callbr, so handle them here
714   // instead of in visitCallBase.
715   if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
716     bool Changed = false;
717 
718     // memmove/cpy/set of zero bytes is a noop.
719     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
720       if (NumBytes->isNullValue())
721         return eraseInstFromFunction(CI);
722 
723       if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
724         if (CI->getZExtValue() == 1) {
725           // Replace the instruction with just byte operations.  We would
726           // transform other cases to loads/stores, but we don't know if
727           // alignment is sufficient.
728         }
729     }
730 
731     // No other transformations apply to volatile transfers.
732     if (auto *M = dyn_cast<MemIntrinsic>(MI))
733       if (M->isVolatile())
734         return nullptr;
735 
736     // If we have a memmove and the source operation is a constant global,
737     // then the source and dest pointers can't alias, so we can change this
738     // into a call to memcpy.
739     if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
740       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
741         if (GVSrc->isConstant()) {
742           Module *M = CI.getModule();
743           Intrinsic::ID MemCpyID =
744               isa<AtomicMemMoveInst>(MMI)
745                   ? Intrinsic::memcpy_element_unordered_atomic
746                   : Intrinsic::memcpy;
747           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
748                            CI.getArgOperand(1)->getType(),
749                            CI.getArgOperand(2)->getType() };
750           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
751           Changed = true;
752         }
753     }
754 
755     if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
756       // memmove(x,x,size) -> noop.
757       if (MTI->getSource() == MTI->getDest())
758         return eraseInstFromFunction(CI);
759     }
760 
761     // If we can determine a pointer alignment that is bigger than currently
762     // set, update the alignment.
763     if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
764       if (Instruction *I = SimplifyAnyMemTransfer(MTI))
765         return I;
766     } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
767       if (Instruction *I = SimplifyAnyMemSet(MSI))
768         return I;
769     }
770 
771     if (Changed) return II;
772   }
773 
774   // For fixed width vector result intrinsics, use the generic demanded vector
775   // support.
776   if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
777     auto VWidth = IIFVTy->getNumElements();
778     APInt UndefElts(VWidth, 0);
779     APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
780     if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
781       if (V != II)
782         return replaceInstUsesWith(*II, V);
783       return II;
784     }
785   }
786 
787   if (II->isCommutative()) {
788     if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
789       return NewCall;
790   }
791 
792   Intrinsic::ID IID = II->getIntrinsicID();
793   switch (IID) {
794   case Intrinsic::objectsize:
795     if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
796       return replaceInstUsesWith(CI, V);
797     return nullptr;
798   case Intrinsic::abs: {
799     Value *IIOperand = II->getArgOperand(0);
800     bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
801 
802     // abs(-x) -> abs(x)
803     // TODO: Copy nsw if it was present on the neg?
804     Value *X;
805     if (match(IIOperand, m_Neg(m_Value(X))))
806       return replaceOperand(*II, 0, X);
807     if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X)))))
808       return replaceOperand(*II, 0, X);
809     if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
810       return replaceOperand(*II, 0, X);
811 
812     if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
813       // abs(x) -> x if x >= 0
814       if (!*Sign)
815         return replaceInstUsesWith(*II, IIOperand);
816 
817       // abs(x) -> -x if x < 0
818       if (IntMinIsPoison)
819         return BinaryOperator::CreateNSWNeg(IIOperand);
820       return BinaryOperator::CreateNeg(IIOperand);
821     }
822 
823     break;
824   }
825   case Intrinsic::bswap: {
826     Value *IIOperand = II->getArgOperand(0);
827     Value *X = nullptr;
828 
829     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
830     if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
831       unsigned C = X->getType()->getScalarSizeInBits() -
832                    IIOperand->getType()->getScalarSizeInBits();
833       Value *CV = ConstantInt::get(X->getType(), C);
834       Value *V = Builder.CreateLShr(X, CV);
835       return new TruncInst(V, IIOperand->getType());
836     }
837     break;
838   }
839   case Intrinsic::masked_load:
840     if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
841       return replaceInstUsesWith(CI, SimplifiedMaskedOp);
842     break;
843   case Intrinsic::masked_store:
844     return simplifyMaskedStore(*II);
845   case Intrinsic::masked_gather:
846     return simplifyMaskedGather(*II);
847   case Intrinsic::masked_scatter:
848     return simplifyMaskedScatter(*II);
849   case Intrinsic::launder_invariant_group:
850   case Intrinsic::strip_invariant_group:
851     if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
852       return replaceInstUsesWith(*II, SkippedBarrier);
853     break;
854   case Intrinsic::powi:
855     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
856       // 0 and 1 are handled in instsimplify
857 
858       // powi(x, -1) -> 1/x
859       if (Power->isMinusOne())
860         return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
861                                           II->getArgOperand(0));
862       // powi(x, 2) -> x*x
863       if (Power->equalsInt(2))
864         return BinaryOperator::CreateFMul(II->getArgOperand(0),
865                                           II->getArgOperand(0));
866     }
867     break;
868 
869   case Intrinsic::cttz:
870   case Intrinsic::ctlz:
871     if (auto *I = foldCttzCtlz(*II, *this))
872       return I;
873     break;
874 
875   case Intrinsic::ctpop:
876     if (auto *I = foldCtpop(*II, *this))
877       return I;
878     break;
879 
880   case Intrinsic::fshl:
881   case Intrinsic::fshr: {
882     Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
883     Type *Ty = II->getType();
884     unsigned BitWidth = Ty->getScalarSizeInBits();
885     Constant *ShAmtC;
886     if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
887         !isa<ConstantExpr>(ShAmtC) && !ShAmtC->containsConstantExpression()) {
888       // Canonicalize a shift amount constant operand to modulo the bit-width.
889       Constant *WidthC = ConstantInt::get(Ty, BitWidth);
890       Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
891       if (ModuloC != ShAmtC)
892         return replaceOperand(*II, 2, ModuloC);
893 
894       assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
895                  ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
896              "Shift amount expected to be modulo bitwidth");
897 
898       // Canonicalize funnel shift right by constant to funnel shift left. This
899       // is not entirely arbitrary. For historical reasons, the backend may
900       // recognize rotate left patterns but miss rotate right patterns.
901       if (IID == Intrinsic::fshr) {
902         // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
903         Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
904         Module *Mod = II->getModule();
905         Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
906         return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
907       }
908       assert(IID == Intrinsic::fshl &&
909              "All funnel shifts by simple constants should go left");
910 
911       // fshl(X, 0, C) --> shl X, C
912       // fshl(X, undef, C) --> shl X, C
913       if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
914         return BinaryOperator::CreateShl(Op0, ShAmtC);
915 
916       // fshl(0, X, C) --> lshr X, (BW-C)
917       // fshl(undef, X, C) --> lshr X, (BW-C)
918       if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
919         return BinaryOperator::CreateLShr(Op1,
920                                           ConstantExpr::getSub(WidthC, ShAmtC));
921 
922       // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
923       if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
924         Module *Mod = II->getModule();
925         Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
926         return CallInst::Create(Bswap, { Op0 });
927       }
928     }
929 
930     // Left or right might be masked.
931     if (SimplifyDemandedInstructionBits(*II))
932       return &CI;
933 
934     // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
935     // so only the low bits of the shift amount are demanded if the bitwidth is
936     // a power-of-2.
937     if (!isPowerOf2_32(BitWidth))
938       break;
939     APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
940     KnownBits Op2Known(BitWidth);
941     if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
942       return &CI;
943     break;
944   }
945   case Intrinsic::uadd_with_overflow:
946   case Intrinsic::sadd_with_overflow: {
947     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
948       return I;
949 
950     // Given 2 constant operands whose sum does not overflow:
951     // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
952     // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
953     Value *X;
954     const APInt *C0, *C1;
955     Value *Arg0 = II->getArgOperand(0);
956     Value *Arg1 = II->getArgOperand(1);
957     bool IsSigned = IID == Intrinsic::sadd_with_overflow;
958     bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
959                              : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
960     if (HasNWAdd && match(Arg1, m_APInt(C1))) {
961       bool Overflow;
962       APInt NewC =
963           IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
964       if (!Overflow)
965         return replaceInstUsesWith(
966             *II, Builder.CreateBinaryIntrinsic(
967                      IID, X, ConstantInt::get(Arg1->getType(), NewC)));
968     }
969     break;
970   }
971 
972   case Intrinsic::umul_with_overflow:
973   case Intrinsic::smul_with_overflow:
974   case Intrinsic::usub_with_overflow:
975     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
976       return I;
977     break;
978 
979   case Intrinsic::ssub_with_overflow: {
980     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
981       return I;
982 
983     Constant *C;
984     Value *Arg0 = II->getArgOperand(0);
985     Value *Arg1 = II->getArgOperand(1);
986     // Given a constant C that is not the minimum signed value
987     // for an integer of a given bit width:
988     //
989     // ssubo X, C -> saddo X, -C
990     if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
991       Value *NegVal = ConstantExpr::getNeg(C);
992       // Build a saddo call that is equivalent to the discovered
993       // ssubo call.
994       return replaceInstUsesWith(
995           *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
996                                              Arg0, NegVal));
997     }
998 
999     break;
1000   }
1001 
1002   case Intrinsic::uadd_sat:
1003   case Intrinsic::sadd_sat:
1004   case Intrinsic::usub_sat:
1005   case Intrinsic::ssub_sat: {
1006     SaturatingInst *SI = cast<SaturatingInst>(II);
1007     Type *Ty = SI->getType();
1008     Value *Arg0 = SI->getLHS();
1009     Value *Arg1 = SI->getRHS();
1010 
1011     // Make use of known overflow information.
1012     OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
1013                                         Arg0, Arg1, SI);
1014     switch (OR) {
1015       case OverflowResult::MayOverflow:
1016         break;
1017       case OverflowResult::NeverOverflows:
1018         if (SI->isSigned())
1019           return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
1020         else
1021           return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
1022       case OverflowResult::AlwaysOverflowsLow: {
1023         unsigned BitWidth = Ty->getScalarSizeInBits();
1024         APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
1025         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
1026       }
1027       case OverflowResult::AlwaysOverflowsHigh: {
1028         unsigned BitWidth = Ty->getScalarSizeInBits();
1029         APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
1030         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
1031       }
1032     }
1033 
1034     // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
1035     Constant *C;
1036     if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
1037         C->isNotMinSignedValue()) {
1038       Value *NegVal = ConstantExpr::getNeg(C);
1039       return replaceInstUsesWith(
1040           *II, Builder.CreateBinaryIntrinsic(
1041               Intrinsic::sadd_sat, Arg0, NegVal));
1042     }
1043 
1044     // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
1045     // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
1046     // if Val and Val2 have the same sign
1047     if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
1048       Value *X;
1049       const APInt *Val, *Val2;
1050       APInt NewVal;
1051       bool IsUnsigned =
1052           IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
1053       if (Other->getIntrinsicID() == IID &&
1054           match(Arg1, m_APInt(Val)) &&
1055           match(Other->getArgOperand(0), m_Value(X)) &&
1056           match(Other->getArgOperand(1), m_APInt(Val2))) {
1057         if (IsUnsigned)
1058           NewVal = Val->uadd_sat(*Val2);
1059         else if (Val->isNonNegative() == Val2->isNonNegative()) {
1060           bool Overflow;
1061           NewVal = Val->sadd_ov(*Val2, Overflow);
1062           if (Overflow) {
1063             // Both adds together may add more than SignedMaxValue
1064             // without saturating the final result.
1065             break;
1066           }
1067         } else {
1068           // Cannot fold saturated addition with different signs.
1069           break;
1070         }
1071 
1072         return replaceInstUsesWith(
1073             *II, Builder.CreateBinaryIntrinsic(
1074                      IID, X, ConstantInt::get(II->getType(), NewVal)));
1075       }
1076     }
1077     break;
1078   }
1079 
1080   case Intrinsic::minnum:
1081   case Intrinsic::maxnum:
1082   case Intrinsic::minimum:
1083   case Intrinsic::maximum: {
1084     Value *Arg0 = II->getArgOperand(0);
1085     Value *Arg1 = II->getArgOperand(1);
1086     Value *X, *Y;
1087     if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
1088         (Arg0->hasOneUse() || Arg1->hasOneUse())) {
1089       // If both operands are negated, invert the call and negate the result:
1090       // min(-X, -Y) --> -(max(X, Y))
1091       // max(-X, -Y) --> -(min(X, Y))
1092       Intrinsic::ID NewIID;
1093       switch (IID) {
1094       case Intrinsic::maxnum:
1095         NewIID = Intrinsic::minnum;
1096         break;
1097       case Intrinsic::minnum:
1098         NewIID = Intrinsic::maxnum;
1099         break;
1100       case Intrinsic::maximum:
1101         NewIID = Intrinsic::minimum;
1102         break;
1103       case Intrinsic::minimum:
1104         NewIID = Intrinsic::maximum;
1105         break;
1106       default:
1107         llvm_unreachable("unexpected intrinsic ID");
1108       }
1109       Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
1110       Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
1111       FNeg->copyIRFlags(II);
1112       return FNeg;
1113     }
1114 
1115     // m(m(X, C2), C1) -> m(X, C)
1116     const APFloat *C1, *C2;
1117     if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
1118       if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
1119           ((match(M->getArgOperand(0), m_Value(X)) &&
1120             match(M->getArgOperand(1), m_APFloat(C2))) ||
1121            (match(M->getArgOperand(1), m_Value(X)) &&
1122             match(M->getArgOperand(0), m_APFloat(C2))))) {
1123         APFloat Res(0.0);
1124         switch (IID) {
1125         case Intrinsic::maxnum:
1126           Res = maxnum(*C1, *C2);
1127           break;
1128         case Intrinsic::minnum:
1129           Res = minnum(*C1, *C2);
1130           break;
1131         case Intrinsic::maximum:
1132           Res = maximum(*C1, *C2);
1133           break;
1134         case Intrinsic::minimum:
1135           Res = minimum(*C1, *C2);
1136           break;
1137         default:
1138           llvm_unreachable("unexpected intrinsic ID");
1139         }
1140         Instruction *NewCall = Builder.CreateBinaryIntrinsic(
1141             IID, X, ConstantFP::get(Arg0->getType(), Res), II);
1142         // TODO: Conservatively intersecting FMF. If Res == C2, the transform
1143         //       was a simplification (so Arg0 and its original flags could
1144         //       propagate?)
1145         NewCall->andIRFlags(M);
1146         return replaceInstUsesWith(*II, NewCall);
1147       }
1148     }
1149 
1150     Value *ExtSrc0;
1151     Value *ExtSrc1;
1152 
1153     // minnum (fpext x), (fpext y) -> minnum x, y
1154     // maxnum (fpext x), (fpext y) -> maxnum x, y
1155     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc0)))) &&
1156         match(II->getArgOperand(1), m_OneUse(m_FPExt(m_Value(ExtSrc1)))) &&
1157         ExtSrc0->getType() == ExtSrc1->getType()) {
1158       Function *F = Intrinsic::getDeclaration(
1159           II->getModule(), II->getIntrinsicID(), {ExtSrc0->getType()});
1160       CallInst *NewCall = Builder.CreateCall(F, { ExtSrc0, ExtSrc1 });
1161       NewCall->copyFastMathFlags(II);
1162       NewCall->takeName(II);
1163       return new FPExtInst(NewCall, II->getType());
1164     }
1165 
1166     break;
1167   }
1168   case Intrinsic::fmuladd: {
1169     // Canonicalize fast fmuladd to the separate fmul + fadd.
1170     if (II->isFast()) {
1171       BuilderTy::FastMathFlagGuard Guard(Builder);
1172       Builder.setFastMathFlags(II->getFastMathFlags());
1173       Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
1174                                       II->getArgOperand(1));
1175       Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
1176       Add->takeName(II);
1177       return replaceInstUsesWith(*II, Add);
1178     }
1179 
1180     // Try to simplify the underlying FMul.
1181     if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
1182                                     II->getFastMathFlags(),
1183                                     SQ.getWithInstruction(II))) {
1184       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1185       FAdd->copyFastMathFlags(II);
1186       return FAdd;
1187     }
1188 
1189     LLVM_FALLTHROUGH;
1190   }
1191   case Intrinsic::fma: {
1192     // fma fneg(x), fneg(y), z -> fma x, y, z
1193     Value *Src0 = II->getArgOperand(0);
1194     Value *Src1 = II->getArgOperand(1);
1195     Value *X, *Y;
1196     if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
1197       replaceOperand(*II, 0, X);
1198       replaceOperand(*II, 1, Y);
1199       return II;
1200     }
1201 
1202     // fma fabs(x), fabs(x), z -> fma x, x, z
1203     if (match(Src0, m_FAbs(m_Value(X))) &&
1204         match(Src1, m_FAbs(m_Specific(X)))) {
1205       replaceOperand(*II, 0, X);
1206       replaceOperand(*II, 1, X);
1207       return II;
1208     }
1209 
1210     // Try to simplify the underlying FMul. We can only apply simplifications
1211     // that do not require rounding.
1212     if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
1213                                    II->getFastMathFlags(),
1214                                    SQ.getWithInstruction(II))) {
1215       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1216       FAdd->copyFastMathFlags(II);
1217       return FAdd;
1218     }
1219 
1220     // fma x, y, 0 -> fmul x, y
1221     // This is always valid for -0.0, but requires nsz for +0.0 as
1222     // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
1223     if (match(II->getArgOperand(2), m_NegZeroFP()) ||
1224         (match(II->getArgOperand(2), m_PosZeroFP()) &&
1225          II->getFastMathFlags().noSignedZeros()))
1226       return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
1227 
1228     break;
1229   }
1230   case Intrinsic::copysign: {
1231     Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
1232     if (SignBitMustBeZero(Sign, &TLI)) {
1233       // If we know that the sign argument is positive, reduce to FABS:
1234       // copysign Mag, +Sign --> fabs Mag
1235       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1236       return replaceInstUsesWith(*II, Fabs);
1237     }
1238     // TODO: There should be a ValueTracking sibling like SignBitMustBeOne.
1239     const APFloat *C;
1240     if (match(Sign, m_APFloat(C)) && C->isNegative()) {
1241       // If we know that the sign argument is negative, reduce to FNABS:
1242       // copysign Mag, -Sign --> fneg (fabs Mag)
1243       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1244       return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
1245     }
1246 
1247     // Propagate sign argument through nested calls:
1248     // copysign Mag, (copysign ?, X) --> copysign Mag, X
1249     Value *X;
1250     if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X))))
1251       return replaceOperand(*II, 1, X);
1252 
1253     // Peek through changes of magnitude's sign-bit. This call rewrites those:
1254     // copysign (fabs X), Sign --> copysign X, Sign
1255     // copysign (fneg X), Sign --> copysign X, Sign
1256     if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
1257       return replaceOperand(*II, 0, X);
1258 
1259     break;
1260   }
1261   case Intrinsic::fabs: {
1262     Value *Cond, *TVal, *FVal;
1263     if (match(II->getArgOperand(0),
1264               m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
1265       // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
1266       if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
1267         CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
1268         CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
1269         return SelectInst::Create(Cond, AbsT, AbsF);
1270       }
1271       // fabs (select Cond, -FVal, FVal) --> fabs FVal
1272       if (match(TVal, m_FNeg(m_Specific(FVal))))
1273         return replaceOperand(*II, 0, FVal);
1274       // fabs (select Cond, TVal, -TVal) --> fabs TVal
1275       if (match(FVal, m_FNeg(m_Specific(TVal))))
1276         return replaceOperand(*II, 0, TVal);
1277     }
1278 
1279     LLVM_FALLTHROUGH;
1280   }
1281   case Intrinsic::ceil:
1282   case Intrinsic::floor:
1283   case Intrinsic::round:
1284   case Intrinsic::roundeven:
1285   case Intrinsic::nearbyint:
1286   case Intrinsic::rint:
1287   case Intrinsic::trunc: {
1288     Value *ExtSrc;
1289     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
1290       // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
1291       Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
1292       return new FPExtInst(NarrowII, II->getType());
1293     }
1294     break;
1295   }
1296   case Intrinsic::cos:
1297   case Intrinsic::amdgcn_cos: {
1298     Value *X;
1299     Value *Src = II->getArgOperand(0);
1300     if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
1301       // cos(-x) -> cos(x)
1302       // cos(fabs(x)) -> cos(x)
1303       return replaceOperand(*II, 0, X);
1304     }
1305     break;
1306   }
1307   case Intrinsic::sin: {
1308     Value *X;
1309     if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
1310       // sin(-x) --> -sin(x)
1311       Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
1312       Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
1313       FNeg->copyFastMathFlags(II);
1314       return FNeg;
1315     }
1316     break;
1317   }
1318 
1319   case Intrinsic::arm_neon_vtbl1:
1320   case Intrinsic::aarch64_neon_tbl1:
1321     if (Value *V = simplifyNeonTbl1(*II, Builder))
1322       return replaceInstUsesWith(*II, V);
1323     break;
1324 
1325   case Intrinsic::arm_neon_vmulls:
1326   case Intrinsic::arm_neon_vmullu:
1327   case Intrinsic::aarch64_neon_smull:
1328   case Intrinsic::aarch64_neon_umull: {
1329     Value *Arg0 = II->getArgOperand(0);
1330     Value *Arg1 = II->getArgOperand(1);
1331 
1332     // Handle mul by zero first:
1333     if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1334       return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1335     }
1336 
1337     // Check for constant LHS & RHS - in this case we just simplify.
1338     bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
1339                  IID == Intrinsic::aarch64_neon_umull);
1340     VectorType *NewVT = cast<VectorType>(II->getType());
1341     if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1342       if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1343         CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1344         CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1345 
1346         return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1347       }
1348 
1349       // Couldn't simplify - canonicalize constant to the RHS.
1350       std::swap(Arg0, Arg1);
1351     }
1352 
1353     // Handle mul by one:
1354     if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1355       if (ConstantInt *Splat =
1356               dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1357         if (Splat->isOne())
1358           return CastInst::CreateIntegerCast(Arg0, II->getType(),
1359                                              /*isSigned=*/!Zext);
1360 
1361     break;
1362   }
1363   case Intrinsic::arm_neon_aesd:
1364   case Intrinsic::arm_neon_aese:
1365   case Intrinsic::aarch64_crypto_aesd:
1366   case Intrinsic::aarch64_crypto_aese: {
1367     Value *DataArg = II->getArgOperand(0);
1368     Value *KeyArg  = II->getArgOperand(1);
1369 
1370     // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
1371     Value *Data, *Key;
1372     if (match(KeyArg, m_ZeroInt()) &&
1373         match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
1374       replaceOperand(*II, 0, Data);
1375       replaceOperand(*II, 1, Key);
1376       return II;
1377     }
1378     break;
1379   }
1380   case Intrinsic::hexagon_V6_vandvrt:
1381   case Intrinsic::hexagon_V6_vandvrt_128B: {
1382     // Simplify Q -> V -> Q conversion.
1383     if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1384       Intrinsic::ID ID0 = Op0->getIntrinsicID();
1385       if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
1386           ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
1387         break;
1388       Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
1389       uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
1390       uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
1391       // Check if every byte has common bits in Bytes and Mask.
1392       uint64_t C = Bytes1 & Mask1;
1393       if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
1394         return replaceInstUsesWith(*II, Op0->getArgOperand(0));
1395     }
1396     break;
1397   }
1398   case Intrinsic::stackrestore: {
1399     // If the save is right next to the restore, remove the restore.  This can
1400     // happen when variable allocas are DCE'd.
1401     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1402       if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1403         // Skip over debug info.
1404         if (SS->getNextNonDebugInstruction() == II) {
1405           return eraseInstFromFunction(CI);
1406         }
1407       }
1408     }
1409 
1410     // Scan down this block to see if there is another stack restore in the
1411     // same block without an intervening call/alloca.
1412     BasicBlock::iterator BI(II);
1413     Instruction *TI = II->getParent()->getTerminator();
1414     bool CannotRemove = false;
1415     for (++BI; &*BI != TI; ++BI) {
1416       if (isa<AllocaInst>(BI)) {
1417         CannotRemove = true;
1418         break;
1419       }
1420       if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1421         if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
1422           // If there is a stackrestore below this one, remove this one.
1423           if (II2->getIntrinsicID() == Intrinsic::stackrestore)
1424             return eraseInstFromFunction(CI);
1425 
1426           // Bail if we cross over an intrinsic with side effects, such as
1427           // llvm.stacksave, or llvm.read_register.
1428           if (II2->mayHaveSideEffects()) {
1429             CannotRemove = true;
1430             break;
1431           }
1432         } else {
1433           // If we found a non-intrinsic call, we can't remove the stack
1434           // restore.
1435           CannotRemove = true;
1436           break;
1437         }
1438       }
1439     }
1440 
1441     // If the stack restore is in a return, resume, or unwind block and if there
1442     // are no allocas or calls between the restore and the return, nuke the
1443     // restore.
1444     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1445       return eraseInstFromFunction(CI);
1446     break;
1447   }
1448   case Intrinsic::lifetime_end:
1449     // Asan needs to poison memory to detect invalid access which is possible
1450     // even for empty lifetime range.
1451     if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
1452         II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
1453         II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
1454       break;
1455 
1456     if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
1457           return I.getIntrinsicID() == Intrinsic::lifetime_start;
1458         }))
1459       return nullptr;
1460     break;
1461   case Intrinsic::assume: {
1462     Value *IIOperand = II->getArgOperand(0);
1463     SmallVector<OperandBundleDef, 4> OpBundles;
1464     II->getOperandBundlesAsDefs(OpBundles);
1465     bool HasOpBundles = !OpBundles.empty();
1466     // Remove an assume if it is followed by an identical assume.
1467     // TODO: Do we need this? Unless there are conflicting assumptions, the
1468     // computeKnownBits(IIOperand) below here eliminates redundant assumes.
1469     Instruction *Next = II->getNextNonDebugInstruction();
1470     if (HasOpBundles &&
1471         match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))) &&
1472         !cast<IntrinsicInst>(Next)->hasOperandBundles())
1473       return eraseInstFromFunction(CI);
1474 
1475     // Canonicalize assume(a && b) -> assume(a); assume(b);
1476     // Note: New assumption intrinsics created here are registered by
1477     // the InstCombineIRInserter object.
1478     FunctionType *AssumeIntrinsicTy = II->getFunctionType();
1479     Value *AssumeIntrinsic = II->getCalledOperand();
1480     Value *A, *B;
1481     if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1482       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
1483                          II->getName());
1484       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
1485       return eraseInstFromFunction(*II);
1486     }
1487     // assume(!(a || b)) -> assume(!a); assume(!b);
1488     if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1489       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
1490                          Builder.CreateNot(A), OpBundles, II->getName());
1491       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
1492                          Builder.CreateNot(B), II->getName());
1493       return eraseInstFromFunction(*II);
1494     }
1495 
1496     // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1497     // (if assume is valid at the load)
1498     CmpInst::Predicate Pred;
1499     Instruction *LHS;
1500     if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
1501         Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
1502         LHS->getType()->isPointerTy() &&
1503         isValidAssumeForContext(II, LHS, &DT)) {
1504       MDNode *MD = MDNode::get(II->getContext(), None);
1505       LHS->setMetadata(LLVMContext::MD_nonnull, MD);
1506       if (!HasOpBundles)
1507         return eraseInstFromFunction(*II);
1508 
1509       // TODO: apply nonnull return attributes to calls and invokes
1510       // TODO: apply range metadata for range check patterns?
1511     }
1512 
1513     // If there is a dominating assume with the same condition as this one,
1514     // then this one is redundant, and should be removed.
1515     KnownBits Known(1);
1516     computeKnownBits(IIOperand, Known, 0, II);
1517     if (Known.isAllOnes() && isAssumeWithEmptyBundle(*II))
1518       return eraseInstFromFunction(*II);
1519 
1520     // Update the cache of affected values for this assumption (we might be
1521     // here because we just simplified the condition).
1522     AC.updateAffectedValues(II);
1523     break;
1524   }
1525   case Intrinsic::experimental_gc_statepoint: {
1526     GCStatepointInst &GCSP = *cast<GCStatepointInst>(II);
1527     SmallPtrSet<Value *, 32> LiveGcValues;
1528     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
1529       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
1530 
1531       // Remove the relocation if unused.
1532       if (GCR.use_empty()) {
1533         eraseInstFromFunction(GCR);
1534         continue;
1535       }
1536 
1537       Value *DerivedPtr = GCR.getDerivedPtr();
1538       Value *BasePtr = GCR.getBasePtr();
1539 
1540       // Undef is undef, even after relocation.
1541       if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
1542         replaceInstUsesWith(GCR, UndefValue::get(GCR.getType()));
1543         eraseInstFromFunction(GCR);
1544         continue;
1545       }
1546 
1547       if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
1548         // The relocation of null will be null for most any collector.
1549         // TODO: provide a hook for this in GCStrategy.  There might be some
1550         // weird collector this property does not hold for.
1551         if (isa<ConstantPointerNull>(DerivedPtr)) {
1552           // Use null-pointer of gc_relocate's type to replace it.
1553           replaceInstUsesWith(GCR, ConstantPointerNull::get(PT));
1554           eraseInstFromFunction(GCR);
1555           continue;
1556         }
1557 
1558         // isKnownNonNull -> nonnull attribute
1559         if (!GCR.hasRetAttr(Attribute::NonNull) &&
1560             isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
1561           GCR.addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1562           // We discovered new fact, re-check users.
1563           Worklist.pushUsersToWorkList(GCR);
1564         }
1565       }
1566 
1567       // If we have two copies of the same pointer in the statepoint argument
1568       // list, canonicalize to one.  This may let us common gc.relocates.
1569       if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
1570           GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
1571         auto *OpIntTy = GCR.getOperand(2)->getType();
1572         GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
1573       }
1574 
1575       // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1576       // Canonicalize on the type from the uses to the defs
1577 
1578       // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1579       LiveGcValues.insert(BasePtr);
1580       LiveGcValues.insert(DerivedPtr);
1581     }
1582     Optional<OperandBundleUse> Bundle =
1583         GCSP.getOperandBundle(LLVMContext::OB_gc_live);
1584     unsigned NumOfGCLives = LiveGcValues.size();
1585     if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size())
1586       break;
1587     // We can reduce the size of gc live bundle.
1588     DenseMap<Value *, unsigned> Val2Idx;
1589     std::vector<Value *> NewLiveGc;
1590     for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) {
1591       Value *V = Bundle->Inputs[I];
1592       if (Val2Idx.count(V))
1593         continue;
1594       if (LiveGcValues.count(V)) {
1595         Val2Idx[V] = NewLiveGc.size();
1596         NewLiveGc.push_back(V);
1597       } else
1598         Val2Idx[V] = NumOfGCLives;
1599     }
1600     // Update all gc.relocates
1601     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
1602       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
1603       Value *BasePtr = GCR.getBasePtr();
1604       assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
1605              "Missed live gc for base pointer");
1606       auto *OpIntTy1 = GCR.getOperand(1)->getType();
1607       GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
1608       Value *DerivedPtr = GCR.getDerivedPtr();
1609       assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
1610              "Missed live gc for derived pointer");
1611       auto *OpIntTy2 = GCR.getOperand(2)->getType();
1612       GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
1613     }
1614     // Create new statepoint instruction.
1615     OperandBundleDef NewBundle("gc-live", NewLiveGc);
1616     if (isa<CallInst>(II))
1617       return CallInst::CreateWithReplacedBundle(cast<CallInst>(II), NewBundle);
1618     else
1619       return InvokeInst::CreateWithReplacedBundle(cast<InvokeInst>(II),
1620                                                   NewBundle);
1621     break;
1622   }
1623   case Intrinsic::experimental_guard: {
1624     // Is this guard followed by another guard?  We scan forward over a small
1625     // fixed window of instructions to handle common cases with conditions
1626     // computed between guards.
1627     Instruction *NextInst = II->getNextNonDebugInstruction();
1628     for (unsigned i = 0; i < GuardWideningWindow; i++) {
1629       // Note: Using context-free form to avoid compile time blow up
1630       if (!isSafeToSpeculativelyExecute(NextInst))
1631         break;
1632       NextInst = NextInst->getNextNonDebugInstruction();
1633     }
1634     Value *NextCond = nullptr;
1635     if (match(NextInst,
1636               m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
1637       Value *CurrCond = II->getArgOperand(0);
1638 
1639       // Remove a guard that it is immediately preceded by an identical guard.
1640       // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
1641       if (CurrCond != NextCond) {
1642         Instruction *MoveI = II->getNextNonDebugInstruction();
1643         while (MoveI != NextInst) {
1644           auto *Temp = MoveI;
1645           MoveI = MoveI->getNextNonDebugInstruction();
1646           Temp->moveBefore(II);
1647         }
1648         replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
1649       }
1650       eraseInstFromFunction(*NextInst);
1651       return II;
1652     }
1653     break;
1654   }
1655   case Intrinsic::experimental_vector_insert: {
1656     Value *Vec = II->getArgOperand(0);
1657     Value *SubVec = II->getArgOperand(1);
1658     Value *Idx = II->getArgOperand(2);
1659     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
1660     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
1661     auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
1662 
1663     // Only canonicalize if the destination vector, Vec, and SubVec are all
1664     // fixed vectors.
1665     if (DstTy && VecTy && SubVecTy) {
1666       unsigned DstNumElts = DstTy->getNumElements();
1667       unsigned VecNumElts = VecTy->getNumElements();
1668       unsigned SubVecNumElts = SubVecTy->getNumElements();
1669       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
1670 
1671       // The result of this call is undefined if IdxN is not a constant multiple
1672       // of the SubVec's minimum vector length OR the insertion overruns Vec.
1673       if (IdxN % SubVecNumElts != 0 || IdxN + SubVecNumElts > VecNumElts) {
1674         replaceInstUsesWith(CI, UndefValue::get(CI.getType()));
1675         return eraseInstFromFunction(CI);
1676       }
1677 
1678       // An insert that entirely overwrites Vec with SubVec is a nop.
1679       if (VecNumElts == SubVecNumElts) {
1680         replaceInstUsesWith(CI, SubVec);
1681         return eraseInstFromFunction(CI);
1682       }
1683 
1684       // Widen SubVec into a vector of the same width as Vec, since
1685       // shufflevector requires the two input vectors to be the same width.
1686       // Elements beyond the bounds of SubVec within the widened vector are
1687       // undefined.
1688       SmallVector<int, 8> WidenMask;
1689       unsigned i;
1690       for (i = 0; i != SubVecNumElts; ++i)
1691         WidenMask.push_back(i);
1692       for (; i != VecNumElts; ++i)
1693         WidenMask.push_back(UndefMaskElem);
1694 
1695       Value *WidenShuffle = Builder.CreateShuffleVector(
1696           SubVec, llvm::UndefValue::get(SubVecTy), WidenMask);
1697 
1698       SmallVector<int, 8> Mask;
1699       for (unsigned i = 0; i != IdxN; ++i)
1700         Mask.push_back(i);
1701       for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
1702         Mask.push_back(i);
1703       for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
1704         Mask.push_back(i);
1705 
1706       Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
1707       replaceInstUsesWith(CI, Shuffle);
1708       return eraseInstFromFunction(CI);
1709     }
1710     break;
1711   }
1712   case Intrinsic::experimental_vector_extract: {
1713     Value *Vec = II->getArgOperand(0);
1714     Value *Idx = II->getArgOperand(1);
1715 
1716     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
1717     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
1718 
1719     // Only canonicalize if the the destination vector and Vec are fixed
1720     // vectors.
1721     if (DstTy && VecTy) {
1722       unsigned DstNumElts = DstTy->getNumElements();
1723       unsigned VecNumElts = VecTy->getNumElements();
1724       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
1725 
1726       // The result of this call is undefined if IdxN is not a constant multiple
1727       // of the result type's minimum vector length OR the extraction overruns
1728       // Vec.
1729       if (IdxN % DstNumElts != 0 || IdxN + DstNumElts > VecNumElts) {
1730         replaceInstUsesWith(CI, UndefValue::get(CI.getType()));
1731         return eraseInstFromFunction(CI);
1732       }
1733 
1734       // Extracting the entirety of Vec is a nop.
1735       if (VecNumElts == DstNumElts) {
1736         replaceInstUsesWith(CI, Vec);
1737         return eraseInstFromFunction(CI);
1738       }
1739 
1740       SmallVector<int, 8> Mask;
1741       for (unsigned i = 0; i != DstNumElts; ++i)
1742         Mask.push_back(IdxN + i);
1743 
1744       Value *Shuffle =
1745           Builder.CreateShuffleVector(Vec, UndefValue::get(VecTy), Mask);
1746       replaceInstUsesWith(CI, Shuffle);
1747       return eraseInstFromFunction(CI);
1748     }
1749     break;
1750   }
1751   default: {
1752     // Handle target specific intrinsics
1753     Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
1754     if (V.hasValue())
1755       return V.getValue();
1756     break;
1757   }
1758   }
1759   return visitCallBase(*II);
1760 }
1761 
1762 // Fence instruction simplification
visitFenceInst(FenceInst & FI)1763 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
1764   // Remove identical consecutive fences.
1765   Instruction *Next = FI.getNextNonDebugInstruction();
1766   if (auto *NFI = dyn_cast<FenceInst>(Next))
1767     if (FI.isIdenticalTo(NFI))
1768       return eraseInstFromFunction(FI);
1769   return nullptr;
1770 }
1771 
1772 // InvokeInst simplification
visitInvokeInst(InvokeInst & II)1773 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) {
1774   return visitCallBase(II);
1775 }
1776 
1777 // CallBrInst simplification
visitCallBrInst(CallBrInst & CBI)1778 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) {
1779   return visitCallBase(CBI);
1780 }
1781 
1782 /// If this cast does not affect the value passed through the varargs area, we
1783 /// can eliminate the use of the cast.
isSafeToEliminateVarargsCast(const CallBase & Call,const DataLayout & DL,const CastInst * const CI,const int ix)1784 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
1785                                          const DataLayout &DL,
1786                                          const CastInst *const CI,
1787                                          const int ix) {
1788   if (!CI->isLosslessCast())
1789     return false;
1790 
1791   // If this is a GC intrinsic, avoid munging types.  We need types for
1792   // statepoint reconstruction in SelectionDAG.
1793   // TODO: This is probably something which should be expanded to all
1794   // intrinsics since the entire point of intrinsics is that
1795   // they are understandable by the optimizer.
1796   if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
1797       isa<GCResultInst>(Call))
1798     return false;
1799 
1800   // The size of ByVal or InAlloca arguments is derived from the type, so we
1801   // can't change to a type with a different size.  If the size were
1802   // passed explicitly we could avoid this check.
1803   if (!Call.isPassPointeeByValueArgument(ix))
1804     return true;
1805 
1806   Type* SrcTy =
1807             cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1808   Type *DstTy = Call.isByValArgument(ix)
1809                     ? Call.getParamByValType(ix)
1810                     : cast<PointerType>(CI->getType())->getElementType();
1811   if (!SrcTy->isSized() || !DstTy->isSized())
1812     return false;
1813   if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
1814     return false;
1815   return true;
1816 }
1817 
tryOptimizeCall(CallInst * CI)1818 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
1819   if (!CI->getCalledFunction()) return nullptr;
1820 
1821   auto InstCombineRAUW = [this](Instruction *From, Value *With) {
1822     replaceInstUsesWith(*From, With);
1823   };
1824   auto InstCombineErase = [this](Instruction *I) {
1825     eraseInstFromFunction(*I);
1826   };
1827   LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
1828                                InstCombineErase);
1829   if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
1830     ++NumSimplified;
1831     return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
1832   }
1833 
1834   return nullptr;
1835 }
1836 
findInitTrampolineFromAlloca(Value * TrampMem)1837 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
1838   // Strip off at most one level of pointer casts, looking for an alloca.  This
1839   // is good enough in practice and simpler than handling any number of casts.
1840   Value *Underlying = TrampMem->stripPointerCasts();
1841   if (Underlying != TrampMem &&
1842       (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1843     return nullptr;
1844   if (!isa<AllocaInst>(Underlying))
1845     return nullptr;
1846 
1847   IntrinsicInst *InitTrampoline = nullptr;
1848   for (User *U : TrampMem->users()) {
1849     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1850     if (!II)
1851       return nullptr;
1852     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1853       if (InitTrampoline)
1854         // More than one init_trampoline writes to this value.  Give up.
1855         return nullptr;
1856       InitTrampoline = II;
1857       continue;
1858     }
1859     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1860       // Allow any number of calls to adjust.trampoline.
1861       continue;
1862     return nullptr;
1863   }
1864 
1865   // No call to init.trampoline found.
1866   if (!InitTrampoline)
1867     return nullptr;
1868 
1869   // Check that the alloca is being used in the expected way.
1870   if (InitTrampoline->getOperand(0) != TrampMem)
1871     return nullptr;
1872 
1873   return InitTrampoline;
1874 }
1875 
findInitTrampolineFromBB(IntrinsicInst * AdjustTramp,Value * TrampMem)1876 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1877                                                Value *TrampMem) {
1878   // Visit all the previous instructions in the basic block, and try to find a
1879   // init.trampoline which has a direct path to the adjust.trampoline.
1880   for (BasicBlock::iterator I = AdjustTramp->getIterator(),
1881                             E = AdjustTramp->getParent()->begin();
1882        I != E;) {
1883     Instruction *Inst = &*--I;
1884     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1885       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1886           II->getOperand(0) == TrampMem)
1887         return II;
1888     if (Inst->mayWriteToMemory())
1889       return nullptr;
1890   }
1891   return nullptr;
1892 }
1893 
1894 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1895 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1896 // to a direct call to a function.  Otherwise return NULL.
findInitTrampoline(Value * Callee)1897 static IntrinsicInst *findInitTrampoline(Value *Callee) {
1898   Callee = Callee->stripPointerCasts();
1899   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1900   if (!AdjustTramp ||
1901       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1902     return nullptr;
1903 
1904   Value *TrampMem = AdjustTramp->getOperand(0);
1905 
1906   if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
1907     return IT;
1908   if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
1909     return IT;
1910   return nullptr;
1911 }
1912 
annotateAnyAllocSite(CallBase & Call,const TargetLibraryInfo * TLI)1913 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
1914   unsigned NumArgs = Call.getNumArgOperands();
1915   ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
1916   ConstantInt *Op1C =
1917       (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));
1918   // Bail out if the allocation size is zero (or an invalid alignment of zero
1919   // with aligned_alloc).
1920   if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
1921     return;
1922 
1923   if (isMallocLikeFn(&Call, TLI) && Op0C) {
1924     if (isOpNewLikeFn(&Call, TLI))
1925       Call.addAttribute(AttributeList::ReturnIndex,
1926                         Attribute::getWithDereferenceableBytes(
1927                             Call.getContext(), Op0C->getZExtValue()));
1928     else
1929       Call.addAttribute(AttributeList::ReturnIndex,
1930                         Attribute::getWithDereferenceableOrNullBytes(
1931                             Call.getContext(), Op0C->getZExtValue()));
1932   } else if (isAlignedAllocLikeFn(&Call, TLI) && Op1C) {
1933     Call.addAttribute(AttributeList::ReturnIndex,
1934                       Attribute::getWithDereferenceableOrNullBytes(
1935                           Call.getContext(), Op1C->getZExtValue()));
1936     // Add alignment attribute if alignment is a power of two constant.
1937     if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment)) {
1938       uint64_t AlignmentVal = Op0C->getZExtValue();
1939       if (llvm::isPowerOf2_64(AlignmentVal))
1940         Call.addAttribute(AttributeList::ReturnIndex,
1941                           Attribute::getWithAlignment(Call.getContext(),
1942                                                       Align(AlignmentVal)));
1943     }
1944   } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
1945     Call.addAttribute(AttributeList::ReturnIndex,
1946                       Attribute::getWithDereferenceableOrNullBytes(
1947                           Call.getContext(), Op1C->getZExtValue()));
1948   } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
1949     bool Overflow;
1950     const APInt &N = Op0C->getValue();
1951     APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
1952     if (!Overflow)
1953       Call.addAttribute(AttributeList::ReturnIndex,
1954                         Attribute::getWithDereferenceableOrNullBytes(
1955                             Call.getContext(), Size.getZExtValue()));
1956   } else if (isStrdupLikeFn(&Call, TLI)) {
1957     uint64_t Len = GetStringLength(Call.getOperand(0));
1958     if (Len) {
1959       // strdup
1960       if (NumArgs == 1)
1961         Call.addAttribute(AttributeList::ReturnIndex,
1962                           Attribute::getWithDereferenceableOrNullBytes(
1963                               Call.getContext(), Len));
1964       // strndup
1965       else if (NumArgs == 2 && Op1C)
1966         Call.addAttribute(
1967             AttributeList::ReturnIndex,
1968             Attribute::getWithDereferenceableOrNullBytes(
1969                 Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1)));
1970     }
1971   }
1972 }
1973 
1974 /// Improvements for call, callbr and invoke instructions.
visitCallBase(CallBase & Call)1975 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
1976   if (isAllocationFn(&Call, &TLI))
1977     annotateAnyAllocSite(Call, &TLI);
1978 
1979   bool Changed = false;
1980 
1981   // Mark any parameters that are known to be non-null with the nonnull
1982   // attribute.  This is helpful for inlining calls to functions with null
1983   // checks on their arguments.
1984   SmallVector<unsigned, 4> ArgNos;
1985   unsigned ArgNo = 0;
1986 
1987   for (Value *V : Call.args()) {
1988     if (V->getType()->isPointerTy() &&
1989         !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
1990         isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
1991       ArgNos.push_back(ArgNo);
1992     ArgNo++;
1993   }
1994 
1995   assert(ArgNo == Call.arg_size() && "sanity check");
1996 
1997   if (!ArgNos.empty()) {
1998     AttributeList AS = Call.getAttributes();
1999     LLVMContext &Ctx = Call.getContext();
2000     AS = AS.addParamAttribute(Ctx, ArgNos,
2001                               Attribute::get(Ctx, Attribute::NonNull));
2002     Call.setAttributes(AS);
2003     Changed = true;
2004   }
2005 
2006   // If the callee is a pointer to a function, attempt to move any casts to the
2007   // arguments of the call/callbr/invoke.
2008   Value *Callee = Call.getCalledOperand();
2009   if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
2010     return nullptr;
2011 
2012   if (Function *CalleeF = dyn_cast<Function>(Callee)) {
2013     // Remove the convergent attr on calls when the callee is not convergent.
2014     if (Call.isConvergent() && !CalleeF->isConvergent() &&
2015         !CalleeF->isIntrinsic()) {
2016       LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
2017                         << "\n");
2018       Call.setNotConvergent();
2019       return &Call;
2020     }
2021 
2022     // If the call and callee calling conventions don't match, this call must
2023     // be unreachable, as the call is undefined.
2024     if (CalleeF->getCallingConv() != Call.getCallingConv() &&
2025         // Only do this for calls to a function with a body.  A prototype may
2026         // not actually end up matching the implementation's calling conv for a
2027         // variety of reasons (e.g. it may be written in assembly).
2028         !CalleeF->isDeclaration()) {
2029       Instruction *OldCall = &Call;
2030       CreateNonTerminatorUnreachable(OldCall);
2031       // If OldCall does not return void then replaceInstUsesWith undef.
2032       // This allows ValueHandlers and custom metadata to adjust itself.
2033       if (!OldCall->getType()->isVoidTy())
2034         replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
2035       if (isa<CallInst>(OldCall))
2036         return eraseInstFromFunction(*OldCall);
2037 
2038       // We cannot remove an invoke or a callbr, because it would change thexi
2039       // CFG, just change the callee to a null pointer.
2040       cast<CallBase>(OldCall)->setCalledFunction(
2041           CalleeF->getFunctionType(),
2042           Constant::getNullValue(CalleeF->getType()));
2043       return nullptr;
2044     }
2045   }
2046 
2047   if ((isa<ConstantPointerNull>(Callee) &&
2048        !NullPointerIsDefined(Call.getFunction())) ||
2049       isa<UndefValue>(Callee)) {
2050     // If Call does not return void then replaceInstUsesWith undef.
2051     // This allows ValueHandlers and custom metadata to adjust itself.
2052     if (!Call.getType()->isVoidTy())
2053       replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
2054 
2055     if (Call.isTerminator()) {
2056       // Can't remove an invoke or callbr because we cannot change the CFG.
2057       return nullptr;
2058     }
2059 
2060     // This instruction is not reachable, just remove it.
2061     CreateNonTerminatorUnreachable(&Call);
2062     return eraseInstFromFunction(Call);
2063   }
2064 
2065   if (IntrinsicInst *II = findInitTrampoline(Callee))
2066     return transformCallThroughTrampoline(Call, *II);
2067 
2068   PointerType *PTy = cast<PointerType>(Callee->getType());
2069   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2070   if (FTy->isVarArg()) {
2071     int ix = FTy->getNumParams();
2072     // See if we can optimize any arguments passed through the varargs area of
2073     // the call.
2074     for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
2075          I != E; ++I, ++ix) {
2076       CastInst *CI = dyn_cast<CastInst>(*I);
2077       if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
2078         replaceUse(*I, CI->getOperand(0));
2079 
2080         // Update the byval type to match the argument type.
2081         if (Call.isByValArgument(ix)) {
2082           Call.removeParamAttr(ix, Attribute::ByVal);
2083           Call.addParamAttr(
2084               ix, Attribute::getWithByValType(
2085                       Call.getContext(),
2086                       CI->getOperand(0)->getType()->getPointerElementType()));
2087         }
2088         Changed = true;
2089       }
2090     }
2091   }
2092 
2093   if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
2094     // Inline asm calls cannot throw - mark them 'nounwind'.
2095     Call.setDoesNotThrow();
2096     Changed = true;
2097   }
2098 
2099   // Try to optimize the call if possible, we require DataLayout for most of
2100   // this.  None of these calls are seen as possibly dead so go ahead and
2101   // delete the instruction now.
2102   if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
2103     Instruction *I = tryOptimizeCall(CI);
2104     // If we changed something return the result, etc. Otherwise let
2105     // the fallthrough check.
2106     if (I) return eraseInstFromFunction(*I);
2107   }
2108 
2109   if (!Call.use_empty() && !Call.isMustTailCall())
2110     if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
2111       Type *CallTy = Call.getType();
2112       Type *RetArgTy = ReturnedArg->getType();
2113       if (RetArgTy->canLosslesslyBitCastTo(CallTy))
2114         return replaceInstUsesWith(
2115             Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
2116     }
2117 
2118   if (isAllocLikeFn(&Call, &TLI))
2119     return visitAllocSite(Call);
2120 
2121   return Changed ? &Call : nullptr;
2122 }
2123 
2124 /// If the callee is a constexpr cast of a function, attempt to move the cast to
2125 /// the arguments of the call/callbr/invoke.
transformConstExprCastCall(CallBase & Call)2126 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
2127   auto *Callee =
2128       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
2129   if (!Callee)
2130     return false;
2131 
2132   // If this is a call to a thunk function, don't remove the cast. Thunks are
2133   // used to transparently forward all incoming parameters and outgoing return
2134   // values, so it's important to leave the cast in place.
2135   if (Callee->hasFnAttribute("thunk"))
2136     return false;
2137 
2138   // If this is a musttail call, the callee's prototype must match the caller's
2139   // prototype with the exception of pointee types. The code below doesn't
2140   // implement that, so we can't do this transform.
2141   // TODO: Do the transform if it only requires adding pointer casts.
2142   if (Call.isMustTailCall())
2143     return false;
2144 
2145   Instruction *Caller = &Call;
2146   const AttributeList &CallerPAL = Call.getAttributes();
2147 
2148   // Okay, this is a cast from a function to a different type.  Unless doing so
2149   // would cause a type conversion of one of our arguments, change this call to
2150   // be a direct call with arguments casted to the appropriate types.
2151   FunctionType *FT = Callee->getFunctionType();
2152   Type *OldRetTy = Caller->getType();
2153   Type *NewRetTy = FT->getReturnType();
2154 
2155   // Check to see if we are changing the return type...
2156   if (OldRetTy != NewRetTy) {
2157 
2158     if (NewRetTy->isStructTy())
2159       return false; // TODO: Handle multiple return values.
2160 
2161     if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
2162       if (Callee->isDeclaration())
2163         return false;   // Cannot transform this return value.
2164 
2165       if (!Caller->use_empty() &&
2166           // void -> non-void is handled specially
2167           !NewRetTy->isVoidTy())
2168         return false;   // Cannot transform this return value.
2169     }
2170 
2171     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
2172       AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
2173       if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
2174         return false;   // Attribute not compatible with transformed value.
2175     }
2176 
2177     // If the callbase is an invoke/callbr instruction, and the return value is
2178     // used by a PHI node in a successor, we cannot change the return type of
2179     // the call because there is no place to put the cast instruction (without
2180     // breaking the critical edge).  Bail out in this case.
2181     if (!Caller->use_empty()) {
2182       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
2183         for (User *U : II->users())
2184           if (PHINode *PN = dyn_cast<PHINode>(U))
2185             if (PN->getParent() == II->getNormalDest() ||
2186                 PN->getParent() == II->getUnwindDest())
2187               return false;
2188       // FIXME: Be conservative for callbr to avoid a quadratic search.
2189       if (isa<CallBrInst>(Caller))
2190         return false;
2191     }
2192   }
2193 
2194   unsigned NumActualArgs = Call.arg_size();
2195   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
2196 
2197   // Prevent us turning:
2198   // declare void @takes_i32_inalloca(i32* inalloca)
2199   //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
2200   //
2201   // into:
2202   //  call void @takes_i32_inalloca(i32* null)
2203   //
2204   //  Similarly, avoid folding away bitcasts of byval calls.
2205   if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
2206       Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated) ||
2207       Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
2208     return false;
2209 
2210   auto AI = Call.arg_begin();
2211   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
2212     Type *ParamTy = FT->getParamType(i);
2213     Type *ActTy = (*AI)->getType();
2214 
2215     if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
2216       return false;   // Cannot transform this parameter value.
2217 
2218     if (AttrBuilder(CallerPAL.getParamAttributes(i))
2219             .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
2220       return false;   // Attribute not compatible with transformed value.
2221 
2222     if (Call.isInAllocaArgument(i))
2223       return false;   // Cannot transform to and from inalloca.
2224 
2225     if (CallerPAL.hasParamAttribute(i, Attribute::SwiftError))
2226       return false;
2227 
2228     // If the parameter is passed as a byval argument, then we have to have a
2229     // sized type and the sized type has to have the same size as the old type.
2230     if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
2231       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
2232       if (!ParamPTy || !ParamPTy->getElementType()->isSized())
2233         return false;
2234 
2235       Type *CurElTy = Call.getParamByValType(i);
2236       if (DL.getTypeAllocSize(CurElTy) !=
2237           DL.getTypeAllocSize(ParamPTy->getElementType()))
2238         return false;
2239     }
2240   }
2241 
2242   if (Callee->isDeclaration()) {
2243     // Do not delete arguments unless we have a function body.
2244     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
2245       return false;
2246 
2247     // If the callee is just a declaration, don't change the varargsness of the
2248     // call.  We don't want to introduce a varargs call where one doesn't
2249     // already exist.
2250     PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType());
2251     if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
2252       return false;
2253 
2254     // If both the callee and the cast type are varargs, we still have to make
2255     // sure the number of fixed parameters are the same or we have the same
2256     // ABI issues as if we introduce a varargs call.
2257     if (FT->isVarArg() &&
2258         cast<FunctionType>(APTy->getElementType())->isVarArg() &&
2259         FT->getNumParams() !=
2260         cast<FunctionType>(APTy->getElementType())->getNumParams())
2261       return false;
2262   }
2263 
2264   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
2265       !CallerPAL.isEmpty()) {
2266     // In this case we have more arguments than the new function type, but we
2267     // won't be dropping them.  Check that these extra arguments have attributes
2268     // that are compatible with being a vararg call argument.
2269     unsigned SRetIdx;
2270     if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
2271         SRetIdx > FT->getNumParams())
2272       return false;
2273   }
2274 
2275   // Okay, we decided that this is a safe thing to do: go ahead and start
2276   // inserting cast instructions as necessary.
2277   SmallVector<Value *, 8> Args;
2278   SmallVector<AttributeSet, 8> ArgAttrs;
2279   Args.reserve(NumActualArgs);
2280   ArgAttrs.reserve(NumActualArgs);
2281 
2282   // Get any return attributes.
2283   AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
2284 
2285   // If the return value is not being used, the type may not be compatible
2286   // with the existing attributes.  Wipe out any problematic attributes.
2287   RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2288 
2289   LLVMContext &Ctx = Call.getContext();
2290   AI = Call.arg_begin();
2291   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2292     Type *ParamTy = FT->getParamType(i);
2293 
2294     Value *NewArg = *AI;
2295     if ((*AI)->getType() != ParamTy)
2296       NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
2297     Args.push_back(NewArg);
2298 
2299     // Add any parameter attributes.
2300     if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
2301       AttrBuilder AB(CallerPAL.getParamAttributes(i));
2302       AB.addByValAttr(NewArg->getType()->getPointerElementType());
2303       ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
2304     } else
2305       ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
2306   }
2307 
2308   // If the function takes more arguments than the call was taking, add them
2309   // now.
2310   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
2311     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2312     ArgAttrs.push_back(AttributeSet());
2313   }
2314 
2315   // If we are removing arguments to the function, emit an obnoxious warning.
2316   if (FT->getNumParams() < NumActualArgs) {
2317     // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2318     if (FT->isVarArg()) {
2319       // Add all of the arguments in their promoted form to the arg list.
2320       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2321         Type *PTy = getPromotedType((*AI)->getType());
2322         Value *NewArg = *AI;
2323         if (PTy != (*AI)->getType()) {
2324           // Must promote to pass through va_arg area!
2325           Instruction::CastOps opcode =
2326             CastInst::getCastOpcode(*AI, false, PTy, false);
2327           NewArg = Builder.CreateCast(opcode, *AI, PTy);
2328         }
2329         Args.push_back(NewArg);
2330 
2331         // Add any parameter attributes.
2332         ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
2333       }
2334     }
2335   }
2336 
2337   AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2338 
2339   if (NewRetTy->isVoidTy())
2340     Caller->setName("");   // Void type should not have a name.
2341 
2342   assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
2343          "missing argument attributes");
2344   AttributeList NewCallerPAL = AttributeList::get(
2345       Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
2346 
2347   SmallVector<OperandBundleDef, 1> OpBundles;
2348   Call.getOperandBundlesAsDefs(OpBundles);
2349 
2350   CallBase *NewCall;
2351   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2352     NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
2353                                    II->getUnwindDest(), Args, OpBundles);
2354   } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
2355     NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
2356                                    CBI->getIndirectDests(), Args, OpBundles);
2357   } else {
2358     NewCall = Builder.CreateCall(Callee, Args, OpBundles);
2359     cast<CallInst>(NewCall)->setTailCallKind(
2360         cast<CallInst>(Caller)->getTailCallKind());
2361   }
2362   NewCall->takeName(Caller);
2363   NewCall->setCallingConv(Call.getCallingConv());
2364   NewCall->setAttributes(NewCallerPAL);
2365 
2366   // Preserve prof metadata if any.
2367   NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
2368 
2369   // Insert a cast of the return type as necessary.
2370   Instruction *NC = NewCall;
2371   Value *NV = NC;
2372   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2373     if (!NV->getType()->isVoidTy()) {
2374       NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2375       NC->setDebugLoc(Caller->getDebugLoc());
2376 
2377       // If this is an invoke/callbr instruction, we should insert it after the
2378       // first non-phi instruction in the normal successor block.
2379       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2380         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2381         InsertNewInstBefore(NC, *I);
2382       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
2383         BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
2384         InsertNewInstBefore(NC, *I);
2385       } else {
2386         // Otherwise, it's a call, just insert cast right after the call.
2387         InsertNewInstBefore(NC, *Caller);
2388       }
2389       Worklist.pushUsersToWorkList(*Caller);
2390     } else {
2391       NV = UndefValue::get(Caller->getType());
2392     }
2393   }
2394 
2395   if (!Caller->use_empty())
2396     replaceInstUsesWith(*Caller, NV);
2397   else if (Caller->hasValueHandle()) {
2398     if (OldRetTy == NV->getType())
2399       ValueHandleBase::ValueIsRAUWd(Caller, NV);
2400     else
2401       // We cannot call ValueIsRAUWd with a different type, and the
2402       // actual tracked value will disappear.
2403       ValueHandleBase::ValueIsDeleted(Caller);
2404   }
2405 
2406   eraseInstFromFunction(*Caller);
2407   return true;
2408 }
2409 
2410 /// Turn a call to a function created by init_trampoline / adjust_trampoline
2411 /// intrinsic pair into a direct call to the underlying function.
2412 Instruction *
transformCallThroughTrampoline(CallBase & Call,IntrinsicInst & Tramp)2413 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
2414                                                  IntrinsicInst &Tramp) {
2415   Value *Callee = Call.getCalledOperand();
2416   Type *CalleeTy = Callee->getType();
2417   FunctionType *FTy = Call.getFunctionType();
2418   AttributeList Attrs = Call.getAttributes();
2419 
2420   // If the call already has the 'nest' attribute somewhere then give up -
2421   // otherwise 'nest' would occur twice after splicing in the chain.
2422   if (Attrs.hasAttrSomewhere(Attribute::Nest))
2423     return nullptr;
2424 
2425   Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
2426   FunctionType *NestFTy = NestF->getFunctionType();
2427 
2428   AttributeList NestAttrs = NestF->getAttributes();
2429   if (!NestAttrs.isEmpty()) {
2430     unsigned NestArgNo = 0;
2431     Type *NestTy = nullptr;
2432     AttributeSet NestAttr;
2433 
2434     // Look for a parameter marked with the 'nest' attribute.
2435     for (FunctionType::param_iterator I = NestFTy->param_begin(),
2436                                       E = NestFTy->param_end();
2437          I != E; ++NestArgNo, ++I) {
2438       AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
2439       if (AS.hasAttribute(Attribute::Nest)) {
2440         // Record the parameter type and any other attributes.
2441         NestTy = *I;
2442         NestAttr = AS;
2443         break;
2444       }
2445     }
2446 
2447     if (NestTy) {
2448       std::vector<Value*> NewArgs;
2449       std::vector<AttributeSet> NewArgAttrs;
2450       NewArgs.reserve(Call.arg_size() + 1);
2451       NewArgAttrs.reserve(Call.arg_size());
2452 
2453       // Insert the nest argument into the call argument list, which may
2454       // mean appending it.  Likewise for attributes.
2455 
2456       {
2457         unsigned ArgNo = 0;
2458         auto I = Call.arg_begin(), E = Call.arg_end();
2459         do {
2460           if (ArgNo == NestArgNo) {
2461             // Add the chain argument and attributes.
2462             Value *NestVal = Tramp.getArgOperand(2);
2463             if (NestVal->getType() != NestTy)
2464               NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
2465             NewArgs.push_back(NestVal);
2466             NewArgAttrs.push_back(NestAttr);
2467           }
2468 
2469           if (I == E)
2470             break;
2471 
2472           // Add the original argument and attributes.
2473           NewArgs.push_back(*I);
2474           NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2475 
2476           ++ArgNo;
2477           ++I;
2478         } while (true);
2479       }
2480 
2481       // The trampoline may have been bitcast to a bogus type (FTy).
2482       // Handle this by synthesizing a new function type, equal to FTy
2483       // with the chain parameter inserted.
2484 
2485       std::vector<Type*> NewTypes;
2486       NewTypes.reserve(FTy->getNumParams()+1);
2487 
2488       // Insert the chain's type into the list of parameter types, which may
2489       // mean appending it.
2490       {
2491         unsigned ArgNo = 0;
2492         FunctionType::param_iterator I = FTy->param_begin(),
2493           E = FTy->param_end();
2494 
2495         do {
2496           if (ArgNo == NestArgNo)
2497             // Add the chain's type.
2498             NewTypes.push_back(NestTy);
2499 
2500           if (I == E)
2501             break;
2502 
2503           // Add the original type.
2504           NewTypes.push_back(*I);
2505 
2506           ++ArgNo;
2507           ++I;
2508         } while (true);
2509       }
2510 
2511       // Replace the trampoline call with a direct call.  Let the generic
2512       // code sort out any function type mismatches.
2513       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
2514                                                 FTy->isVarArg());
2515       Constant *NewCallee =
2516         NestF->getType() == PointerType::getUnqual(NewFTy) ?
2517         NestF : ConstantExpr::getBitCast(NestF,
2518                                          PointerType::getUnqual(NewFTy));
2519       AttributeList NewPAL =
2520           AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
2521                              Attrs.getRetAttributes(), NewArgAttrs);
2522 
2523       SmallVector<OperandBundleDef, 1> OpBundles;
2524       Call.getOperandBundlesAsDefs(OpBundles);
2525 
2526       Instruction *NewCaller;
2527       if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
2528         NewCaller = InvokeInst::Create(NewFTy, NewCallee,
2529                                        II->getNormalDest(), II->getUnwindDest(),
2530                                        NewArgs, OpBundles);
2531         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
2532         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
2533       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
2534         NewCaller =
2535             CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
2536                                CBI->getIndirectDests(), NewArgs, OpBundles);
2537         cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
2538         cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
2539       } else {
2540         NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
2541         cast<CallInst>(NewCaller)->setTailCallKind(
2542             cast<CallInst>(Call).getTailCallKind());
2543         cast<CallInst>(NewCaller)->setCallingConv(
2544             cast<CallInst>(Call).getCallingConv());
2545         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
2546       }
2547       NewCaller->setDebugLoc(Call.getDebugLoc());
2548 
2549       return NewCaller;
2550     }
2551   }
2552 
2553   // Replace the trampoline call with a direct call.  Since there is no 'nest'
2554   // parameter, there is no need to adjust the argument list.  Let the generic
2555   // code sort out any function type mismatches.
2556   Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
2557   Call.setCalledFunction(FTy, NewCallee);
2558   return &Call;
2559 }
2560