1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitCall and visitInvoke functions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/IR/Statepoint.h"
22 #include "llvm/Transforms/Utils/BuildLibCalls.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
25 using namespace llvm;
26 using namespace PatternMatch;
27
28 #define DEBUG_TYPE "instcombine"
29
30 STATISTIC(NumSimplified, "Number of library calls simplified");
31
32 /// getPromotedType - Return the specified type promoted as it would be to pass
33 /// though a va_arg area.
getPromotedType(Type * Ty)34 static Type *getPromotedType(Type *Ty) {
35 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
36 if (ITy->getBitWidth() < 32)
37 return Type::getInt32Ty(Ty->getContext());
38 }
39 return Ty;
40 }
41
42 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
43 /// single scalar element, like {{{type}}} or [1 x type], return type.
reduceToSingleValueType(Type * T)44 static Type *reduceToSingleValueType(Type *T) {
45 while (!T->isSingleValueType()) {
46 if (StructType *STy = dyn_cast<StructType>(T)) {
47 if (STy->getNumElements() == 1)
48 T = STy->getElementType(0);
49 else
50 break;
51 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
52 if (ATy->getNumElements() == 1)
53 T = ATy->getElementType();
54 else
55 break;
56 } else
57 break;
58 }
59
60 return T;
61 }
62
SimplifyMemTransfer(MemIntrinsic * MI)63 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
64 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
65 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
66 unsigned MinAlign = std::min(DstAlign, SrcAlign);
67 unsigned CopyAlign = MI->getAlignment();
68
69 if (CopyAlign < MinAlign) {
70 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), MinAlign, false));
71 return MI;
72 }
73
74 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
75 // load/store.
76 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
77 if (!MemOpLength) return nullptr;
78
79 // Source and destination pointer types are always "i8*" for intrinsic. See
80 // if the size is something we can handle with a single primitive load/store.
81 // A single load+store correctly handles overlapping memory in the memmove
82 // case.
83 uint64_t Size = MemOpLength->getLimitedValue();
84 assert(Size && "0-sized memory transferring should be removed already.");
85
86 if (Size > 8 || (Size&(Size-1)))
87 return nullptr; // If not 1/2/4/8 bytes, exit.
88
89 // Use an integer load+store unless we can find something better.
90 unsigned SrcAddrSp =
91 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
92 unsigned DstAddrSp =
93 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
94
95 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
96 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
97 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
98
99 // Memcpy forces the use of i8* for the source and destination. That means
100 // that if you're using memcpy to move one double around, you'll get a cast
101 // from double* to i8*. We'd much rather use a double load+store rather than
102 // an i64 load+store, here because this improves the odds that the source or
103 // dest address will be promotable. See if we can find a better type than the
104 // integer datatype.
105 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
106 MDNode *CopyMD = nullptr;
107 if (StrippedDest != MI->getArgOperand(0)) {
108 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
109 ->getElementType();
110 if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
111 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
112 // down through these levels if so.
113 SrcETy = reduceToSingleValueType(SrcETy);
114
115 if (SrcETy->isSingleValueType()) {
116 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
117 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
118
119 // If the memcpy has metadata describing the members, see if we can
120 // get the TBAA tag describing our copy.
121 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
122 if (M->getNumOperands() == 3 && M->getOperand(0) &&
123 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
124 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
125 M->getOperand(1) &&
126 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
127 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
128 Size &&
129 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
130 CopyMD = cast<MDNode>(M->getOperand(2));
131 }
132 }
133 }
134 }
135
136 // If the memcpy/memmove provides better alignment info than we can
137 // infer, use it.
138 SrcAlign = std::max(SrcAlign, CopyAlign);
139 DstAlign = std::max(DstAlign, CopyAlign);
140
141 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
142 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
143 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
144 L->setAlignment(SrcAlign);
145 if (CopyMD)
146 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
147 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
148 S->setAlignment(DstAlign);
149 if (CopyMD)
150 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
151
152 // Set the size of the copy to 0, it will be deleted on the next iteration.
153 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
154 return MI;
155 }
156
SimplifyMemSet(MemSetInst * MI)157 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
158 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
159 if (MI->getAlignment() < Alignment) {
160 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
161 Alignment, false));
162 return MI;
163 }
164
165 // Extract the length and alignment and fill if they are constant.
166 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
167 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
168 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
169 return nullptr;
170 uint64_t Len = LenC->getLimitedValue();
171 Alignment = MI->getAlignment();
172 assert(Len && "0-sized memory setting should be removed already.");
173
174 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
175 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
176 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
177
178 Value *Dest = MI->getDest();
179 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
180 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
181 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
182
183 // Alignment 0 is identity for alignment 1 for memset, but not store.
184 if (Alignment == 0) Alignment = 1;
185
186 // Extract the fill value and store.
187 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
188 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
189 MI->isVolatile());
190 S->setAlignment(Alignment);
191
192 // Set the size of the copy to 0, it will be deleted on the next iteration.
193 MI->setLength(Constant::getNullValue(LenC->getType()));
194 return MI;
195 }
196
197 return nullptr;
198 }
199
SimplifyX86immshift(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)200 static Value *SimplifyX86immshift(const IntrinsicInst &II,
201 InstCombiner::BuilderTy &Builder) {
202 bool LogicalShift = false;
203 bool ShiftLeft = false;
204
205 switch (II.getIntrinsicID()) {
206 default:
207 return nullptr;
208 case Intrinsic::x86_sse2_psra_d:
209 case Intrinsic::x86_sse2_psra_w:
210 case Intrinsic::x86_sse2_psrai_d:
211 case Intrinsic::x86_sse2_psrai_w:
212 case Intrinsic::x86_avx2_psra_d:
213 case Intrinsic::x86_avx2_psra_w:
214 case Intrinsic::x86_avx2_psrai_d:
215 case Intrinsic::x86_avx2_psrai_w:
216 LogicalShift = false; ShiftLeft = false;
217 break;
218 case Intrinsic::x86_sse2_psrl_d:
219 case Intrinsic::x86_sse2_psrl_q:
220 case Intrinsic::x86_sse2_psrl_w:
221 case Intrinsic::x86_sse2_psrli_d:
222 case Intrinsic::x86_sse2_psrli_q:
223 case Intrinsic::x86_sse2_psrli_w:
224 case Intrinsic::x86_avx2_psrl_d:
225 case Intrinsic::x86_avx2_psrl_q:
226 case Intrinsic::x86_avx2_psrl_w:
227 case Intrinsic::x86_avx2_psrli_d:
228 case Intrinsic::x86_avx2_psrli_q:
229 case Intrinsic::x86_avx2_psrli_w:
230 LogicalShift = true; ShiftLeft = false;
231 break;
232 case Intrinsic::x86_sse2_psll_d:
233 case Intrinsic::x86_sse2_psll_q:
234 case Intrinsic::x86_sse2_psll_w:
235 case Intrinsic::x86_sse2_pslli_d:
236 case Intrinsic::x86_sse2_pslli_q:
237 case Intrinsic::x86_sse2_pslli_w:
238 case Intrinsic::x86_avx2_psll_d:
239 case Intrinsic::x86_avx2_psll_q:
240 case Intrinsic::x86_avx2_psll_w:
241 case Intrinsic::x86_avx2_pslli_d:
242 case Intrinsic::x86_avx2_pslli_q:
243 case Intrinsic::x86_avx2_pslli_w:
244 LogicalShift = true; ShiftLeft = true;
245 break;
246 }
247 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
248
249 // Simplify if count is constant.
250 auto Arg1 = II.getArgOperand(1);
251 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
252 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
253 auto CInt = dyn_cast<ConstantInt>(Arg1);
254 if (!CAZ && !CDV && !CInt)
255 return nullptr;
256
257 APInt Count(64, 0);
258 if (CDV) {
259 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
260 // operand to compute the shift amount.
261 auto VT = cast<VectorType>(CDV->getType());
262 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
263 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
264 unsigned NumSubElts = 64 / BitWidth;
265
266 // Concatenate the sub-elements to create the 64-bit value.
267 for (unsigned i = 0; i != NumSubElts; ++i) {
268 unsigned SubEltIdx = (NumSubElts - 1) - i;
269 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
270 Count = Count.shl(BitWidth);
271 Count |= SubElt->getValue().zextOrTrunc(64);
272 }
273 }
274 else if (CInt)
275 Count = CInt->getValue();
276
277 auto Vec = II.getArgOperand(0);
278 auto VT = cast<VectorType>(Vec->getType());
279 auto SVT = VT->getElementType();
280 unsigned VWidth = VT->getNumElements();
281 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
282
283 // If shift-by-zero then just return the original value.
284 if (Count == 0)
285 return Vec;
286
287 // Handle cases when Shift >= BitWidth.
288 if (Count.uge(BitWidth)) {
289 // If LogicalShift - just return zero.
290 if (LogicalShift)
291 return ConstantAggregateZero::get(VT);
292
293 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
294 Count = APInt(64, BitWidth - 1);
295 }
296
297 // Get a constant vector of the same type as the first operand.
298 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
299 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
300
301 if (ShiftLeft)
302 return Builder.CreateShl(Vec, ShiftVec);
303
304 if (LogicalShift)
305 return Builder.CreateLShr(Vec, ShiftVec);
306
307 return Builder.CreateAShr(Vec, ShiftVec);
308 }
309
SimplifyX86extend(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder,bool SignExtend)310 static Value *SimplifyX86extend(const IntrinsicInst &II,
311 InstCombiner::BuilderTy &Builder,
312 bool SignExtend) {
313 VectorType *SrcTy = cast<VectorType>(II.getArgOperand(0)->getType());
314 VectorType *DstTy = cast<VectorType>(II.getType());
315 unsigned NumDstElts = DstTy->getNumElements();
316
317 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
318 SmallVector<int, 8> ShuffleMask;
319 for (int i = 0; i != (int)NumDstElts; ++i)
320 ShuffleMask.push_back(i);
321
322 Value *SV = Builder.CreateShuffleVector(II.getArgOperand(0),
323 UndefValue::get(SrcTy), ShuffleMask);
324 return SignExtend ? Builder.CreateSExt(SV, DstTy)
325 : Builder.CreateZExt(SV, DstTy);
326 }
327
SimplifyX86insertps(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)328 static Value *SimplifyX86insertps(const IntrinsicInst &II,
329 InstCombiner::BuilderTy &Builder) {
330 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
331 VectorType *VecTy = cast<VectorType>(II.getType());
332 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
333
334 // The immediate permute control byte looks like this:
335 // [3:0] - zero mask for each 32-bit lane
336 // [5:4] - select one 32-bit destination lane
337 // [7:6] - select one 32-bit source lane
338
339 uint8_t Imm = CInt->getZExtValue();
340 uint8_t ZMask = Imm & 0xf;
341 uint8_t DestLane = (Imm >> 4) & 0x3;
342 uint8_t SourceLane = (Imm >> 6) & 0x3;
343
344 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
345
346 // If all zero mask bits are set, this was just a weird way to
347 // generate a zero vector.
348 if (ZMask == 0xf)
349 return ZeroVector;
350
351 // Initialize by passing all of the first source bits through.
352 int ShuffleMask[4] = { 0, 1, 2, 3 };
353
354 // We may replace the second operand with the zero vector.
355 Value *V1 = II.getArgOperand(1);
356
357 if (ZMask) {
358 // If the zero mask is being used with a single input or the zero mask
359 // overrides the destination lane, this is a shuffle with the zero vector.
360 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
361 (ZMask & (1 << DestLane))) {
362 V1 = ZeroVector;
363 // We may still move 32-bits of the first source vector from one lane
364 // to another.
365 ShuffleMask[DestLane] = SourceLane;
366 // The zero mask may override the previous insert operation.
367 for (unsigned i = 0; i < 4; ++i)
368 if ((ZMask >> i) & 0x1)
369 ShuffleMask[i] = i + 4;
370 } else {
371 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
372 return nullptr;
373 }
374 } else {
375 // Replace the selected destination lane with the selected source lane.
376 ShuffleMask[DestLane] = SourceLane + 4;
377 }
378
379 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
380 }
381 return nullptr;
382 }
383
384 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
385 /// or conversion to a shuffle vector.
SimplifyX86extrq(IntrinsicInst & II,Value * Op0,ConstantInt * CILength,ConstantInt * CIIndex,InstCombiner::BuilderTy & Builder)386 static Value *SimplifyX86extrq(IntrinsicInst &II, Value *Op0,
387 ConstantInt *CILength, ConstantInt *CIIndex,
388 InstCombiner::BuilderTy &Builder) {
389 auto LowConstantHighUndef = [&](uint64_t Val) {
390 Type *IntTy64 = Type::getInt64Ty(II.getContext());
391 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
392 UndefValue::get(IntTy64)};
393 return ConstantVector::get(Args);
394 };
395
396 // See if we're dealing with constant values.
397 Constant *C0 = dyn_cast<Constant>(Op0);
398 ConstantInt *CI0 =
399 C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
400 : nullptr;
401
402 // Attempt to constant fold.
403 if (CILength && CIIndex) {
404 // From AMD documentation: "The bit index and field length are each six
405 // bits in length other bits of the field are ignored."
406 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
407 APInt APLength = CILength->getValue().zextOrTrunc(6);
408
409 unsigned Index = APIndex.getZExtValue();
410
411 // From AMD documentation: "a value of zero in the field length is
412 // defined as length of 64".
413 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
414
415 // From AMD documentation: "If the sum of the bit index + length field
416 // is greater than 64, the results are undefined".
417 unsigned End = Index + Length;
418
419 // Note that both field index and field length are 8-bit quantities.
420 // Since variables 'Index' and 'Length' are unsigned values
421 // obtained from zero-extending field index and field length
422 // respectively, their sum should never wrap around.
423 if (End > 64)
424 return UndefValue::get(II.getType());
425
426 // If we are inserting whole bytes, we can convert this to a shuffle.
427 // Lowering can recognize EXTRQI shuffle masks.
428 if ((Length % 8) == 0 && (Index % 8) == 0) {
429 // Convert bit indices to byte indices.
430 Length /= 8;
431 Index /= 8;
432
433 Type *IntTy8 = Type::getInt8Ty(II.getContext());
434 Type *IntTy32 = Type::getInt32Ty(II.getContext());
435 VectorType *ShufTy = VectorType::get(IntTy8, 16);
436
437 SmallVector<Constant *, 16> ShuffleMask;
438 for (int i = 0; i != (int)Length; ++i)
439 ShuffleMask.push_back(
440 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
441 for (int i = Length; i != 8; ++i)
442 ShuffleMask.push_back(
443 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
444 for (int i = 8; i != 16; ++i)
445 ShuffleMask.push_back(UndefValue::get(IntTy32));
446
447 Value *SV = Builder.CreateShuffleVector(
448 Builder.CreateBitCast(Op0, ShufTy),
449 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
450 return Builder.CreateBitCast(SV, II.getType());
451 }
452
453 // Constant Fold - shift Index'th bit to lowest position and mask off
454 // Length bits.
455 if (CI0) {
456 APInt Elt = CI0->getValue();
457 Elt = Elt.lshr(Index).zextOrTrunc(Length);
458 return LowConstantHighUndef(Elt.getZExtValue());
459 }
460
461 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
462 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
463 Value *Args[] = {Op0, CILength, CIIndex};
464 Module *M = II.getModule();
465 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
466 return Builder.CreateCall(F, Args);
467 }
468 }
469
470 // Constant Fold - extraction from zero is always {zero, undef}.
471 if (CI0 && CI0->equalsInt(0))
472 return LowConstantHighUndef(0);
473
474 return nullptr;
475 }
476
477 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
478 /// folding or conversion to a shuffle vector.
SimplifyX86insertq(IntrinsicInst & II,Value * Op0,Value * Op1,APInt APLength,APInt APIndex,InstCombiner::BuilderTy & Builder)479 static Value *SimplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
480 APInt APLength, APInt APIndex,
481 InstCombiner::BuilderTy &Builder) {
482
483 // From AMD documentation: "The bit index and field length are each six bits
484 // in length other bits of the field are ignored."
485 APIndex = APIndex.zextOrTrunc(6);
486 APLength = APLength.zextOrTrunc(6);
487
488 // Attempt to constant fold.
489 unsigned Index = APIndex.getZExtValue();
490
491 // From AMD documentation: "a value of zero in the field length is
492 // defined as length of 64".
493 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
494
495 // From AMD documentation: "If the sum of the bit index + length field
496 // is greater than 64, the results are undefined".
497 unsigned End = Index + Length;
498
499 // Note that both field index and field length are 8-bit quantities.
500 // Since variables 'Index' and 'Length' are unsigned values
501 // obtained from zero-extending field index and field length
502 // respectively, their sum should never wrap around.
503 if (End > 64)
504 return UndefValue::get(II.getType());
505
506 // If we are inserting whole bytes, we can convert this to a shuffle.
507 // Lowering can recognize INSERTQI shuffle masks.
508 if ((Length % 8) == 0 && (Index % 8) == 0) {
509 // Convert bit indices to byte indices.
510 Length /= 8;
511 Index /= 8;
512
513 Type *IntTy8 = Type::getInt8Ty(II.getContext());
514 Type *IntTy32 = Type::getInt32Ty(II.getContext());
515 VectorType *ShufTy = VectorType::get(IntTy8, 16);
516
517 SmallVector<Constant *, 16> ShuffleMask;
518 for (int i = 0; i != (int)Index; ++i)
519 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
520 for (int i = 0; i != (int)Length; ++i)
521 ShuffleMask.push_back(
522 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
523 for (int i = Index + Length; i != 8; ++i)
524 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
525 for (int i = 8; i != 16; ++i)
526 ShuffleMask.push_back(UndefValue::get(IntTy32));
527
528 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
529 Builder.CreateBitCast(Op1, ShufTy),
530 ConstantVector::get(ShuffleMask));
531 return Builder.CreateBitCast(SV, II.getType());
532 }
533
534 // See if we're dealing with constant values.
535 Constant *C0 = dyn_cast<Constant>(Op0);
536 Constant *C1 = dyn_cast<Constant>(Op1);
537 ConstantInt *CI00 =
538 C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
539 : nullptr;
540 ConstantInt *CI10 =
541 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
542 : nullptr;
543
544 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
545 if (CI00 && CI10) {
546 APInt V00 = CI00->getValue();
547 APInt V10 = CI10->getValue();
548 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
549 V00 = V00 & ~Mask;
550 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
551 APInt Val = V00 | V10;
552 Type *IntTy64 = Type::getInt64Ty(II.getContext());
553 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
554 UndefValue::get(IntTy64)};
555 return ConstantVector::get(Args);
556 }
557
558 // If we were an INSERTQ call, we'll save demanded elements if we convert to
559 // INSERTQI.
560 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
561 Type *IntTy8 = Type::getInt8Ty(II.getContext());
562 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
563 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
564
565 Value *Args[] = {Op0, Op1, CILength, CIIndex};
566 Module *M = II.getModule();
567 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
568 return Builder.CreateCall(F, Args);
569 }
570
571 return nullptr;
572 }
573
574 /// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
575 /// source vectors, unless a zero bit is set. If a zero bit is set,
576 /// then ignore that half of the mask and clear that half of the vector.
SimplifyX86vperm2(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)577 static Value *SimplifyX86vperm2(const IntrinsicInst &II,
578 InstCombiner::BuilderTy &Builder) {
579 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
580 VectorType *VecTy = cast<VectorType>(II.getType());
581 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
582
583 // The immediate permute control byte looks like this:
584 // [1:0] - select 128 bits from sources for low half of destination
585 // [2] - ignore
586 // [3] - zero low half of destination
587 // [5:4] - select 128 bits from sources for high half of destination
588 // [6] - ignore
589 // [7] - zero high half of destination
590
591 uint8_t Imm = CInt->getZExtValue();
592
593 bool LowHalfZero = Imm & 0x08;
594 bool HighHalfZero = Imm & 0x80;
595
596 // If both zero mask bits are set, this was just a weird way to
597 // generate a zero vector.
598 if (LowHalfZero && HighHalfZero)
599 return ZeroVector;
600
601 // If 0 or 1 zero mask bits are set, this is a simple shuffle.
602 unsigned NumElts = VecTy->getNumElements();
603 unsigned HalfSize = NumElts / 2;
604 SmallVector<int, 8> ShuffleMask(NumElts);
605
606 // The high bit of the selection field chooses the 1st or 2nd operand.
607 bool LowInputSelect = Imm & 0x02;
608 bool HighInputSelect = Imm & 0x20;
609
610 // The low bit of the selection field chooses the low or high half
611 // of the selected operand.
612 bool LowHalfSelect = Imm & 0x01;
613 bool HighHalfSelect = Imm & 0x10;
614
615 // Determine which operand(s) are actually in use for this instruction.
616 Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
617 Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
618
619 // If needed, replace operands based on zero mask.
620 V0 = LowHalfZero ? ZeroVector : V0;
621 V1 = HighHalfZero ? ZeroVector : V1;
622
623 // Permute low half of result.
624 unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
625 for (unsigned i = 0; i < HalfSize; ++i)
626 ShuffleMask[i] = StartIndex + i;
627
628 // Permute high half of result.
629 StartIndex = HighHalfSelect ? HalfSize : 0;
630 StartIndex += NumElts;
631 for (unsigned i = 0; i < HalfSize; ++i)
632 ShuffleMask[i + HalfSize] = StartIndex + i;
633
634 return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
635 }
636 return nullptr;
637 }
638
639 /// Decode XOP integer vector comparison intrinsics.
SimplifyX86vpcom(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder,bool IsSigned)640 static Value *SimplifyX86vpcom(const IntrinsicInst &II,
641 InstCombiner::BuilderTy &Builder, bool IsSigned) {
642 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
643 uint64_t Imm = CInt->getZExtValue() & 0x7;
644 VectorType *VecTy = cast<VectorType>(II.getType());
645 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
646
647 switch (Imm) {
648 case 0x0:
649 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
650 break;
651 case 0x1:
652 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
653 break;
654 case 0x2:
655 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
656 break;
657 case 0x3:
658 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
659 break;
660 case 0x4:
661 Pred = ICmpInst::ICMP_EQ; break;
662 case 0x5:
663 Pred = ICmpInst::ICMP_NE; break;
664 case 0x6:
665 return ConstantInt::getSigned(VecTy, 0); // FALSE
666 case 0x7:
667 return ConstantInt::getSigned(VecTy, -1); // TRUE
668 }
669
670 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0), II.getArgOperand(1)))
671 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
672 }
673 return nullptr;
674 }
675
676 /// visitCallInst - CallInst simplification. This mostly only handles folding
677 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
678 /// the heavy lifting.
679 ///
visitCallInst(CallInst & CI)680 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
681 auto Args = CI.arg_operands();
682 if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
683 TLI, DT, AC))
684 return ReplaceInstUsesWith(CI, V);
685
686 if (isFreeCall(&CI, TLI))
687 return visitFree(CI);
688
689 // If the caller function is nounwind, mark the call as nounwind, even if the
690 // callee isn't.
691 if (CI.getParent()->getParent()->doesNotThrow() &&
692 !CI.doesNotThrow()) {
693 CI.setDoesNotThrow();
694 return &CI;
695 }
696
697 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
698 if (!II) return visitCallSite(&CI);
699
700 // Intrinsics cannot occur in an invoke, so handle them here instead of in
701 // visitCallSite.
702 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
703 bool Changed = false;
704
705 // memmove/cpy/set of zero bytes is a noop.
706 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
707 if (NumBytes->isNullValue())
708 return EraseInstFromFunction(CI);
709
710 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
711 if (CI->getZExtValue() == 1) {
712 // Replace the instruction with just byte operations. We would
713 // transform other cases to loads/stores, but we don't know if
714 // alignment is sufficient.
715 }
716 }
717
718 // No other transformations apply to volatile transfers.
719 if (MI->isVolatile())
720 return nullptr;
721
722 // If we have a memmove and the source operation is a constant global,
723 // then the source and dest pointers can't alias, so we can change this
724 // into a call to memcpy.
725 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
726 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
727 if (GVSrc->isConstant()) {
728 Module *M = CI.getModule();
729 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
730 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
731 CI.getArgOperand(1)->getType(),
732 CI.getArgOperand(2)->getType() };
733 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
734 Changed = true;
735 }
736 }
737
738 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
739 // memmove(x,x,size) -> noop.
740 if (MTI->getSource() == MTI->getDest())
741 return EraseInstFromFunction(CI);
742 }
743
744 // If we can determine a pointer alignment that is bigger than currently
745 // set, update the alignment.
746 if (isa<MemTransferInst>(MI)) {
747 if (Instruction *I = SimplifyMemTransfer(MI))
748 return I;
749 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
750 if (Instruction *I = SimplifyMemSet(MSI))
751 return I;
752 }
753
754 if (Changed) return II;
755 }
756
757 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width, unsigned DemandedWidth)
758 {
759 APInt UndefElts(Width, 0);
760 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
761 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
762 };
763
764 switch (II->getIntrinsicID()) {
765 default: break;
766 case Intrinsic::objectsize: {
767 uint64_t Size;
768 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
769 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
770 return nullptr;
771 }
772 case Intrinsic::bswap: {
773 Value *IIOperand = II->getArgOperand(0);
774 Value *X = nullptr;
775
776 // bswap(bswap(x)) -> x
777 if (match(IIOperand, m_BSwap(m_Value(X))))
778 return ReplaceInstUsesWith(CI, X);
779
780 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
781 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
782 unsigned C = X->getType()->getPrimitiveSizeInBits() -
783 IIOperand->getType()->getPrimitiveSizeInBits();
784 Value *CV = ConstantInt::get(X->getType(), C);
785 Value *V = Builder->CreateLShr(X, CV);
786 return new TruncInst(V, IIOperand->getType());
787 }
788 break;
789 }
790
791 case Intrinsic::bitreverse: {
792 Value *IIOperand = II->getArgOperand(0);
793 Value *X = nullptr;
794
795 // bitreverse(bitreverse(x)) -> x
796 if (match(IIOperand, m_Intrinsic<Intrinsic::bitreverse>(m_Value(X))))
797 return ReplaceInstUsesWith(CI, X);
798 break;
799 }
800
801 case Intrinsic::powi:
802 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
803 // powi(x, 0) -> 1.0
804 if (Power->isZero())
805 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
806 // powi(x, 1) -> x
807 if (Power->isOne())
808 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
809 // powi(x, -1) -> 1/x
810 if (Power->isAllOnesValue())
811 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
812 II->getArgOperand(0));
813 }
814 break;
815 case Intrinsic::cttz: {
816 // If all bits below the first known one are known zero,
817 // this value is constant.
818 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
819 // FIXME: Try to simplify vectors of integers.
820 if (!IT) break;
821 uint32_t BitWidth = IT->getBitWidth();
822 APInt KnownZero(BitWidth, 0);
823 APInt KnownOne(BitWidth, 0);
824 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
825 unsigned TrailingZeros = KnownOne.countTrailingZeros();
826 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
827 if ((Mask & KnownZero) == Mask)
828 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
829 APInt(BitWidth, TrailingZeros)));
830
831 }
832 break;
833 case Intrinsic::ctlz: {
834 // If all bits above the first known one are known zero,
835 // this value is constant.
836 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
837 // FIXME: Try to simplify vectors of integers.
838 if (!IT) break;
839 uint32_t BitWidth = IT->getBitWidth();
840 APInt KnownZero(BitWidth, 0);
841 APInt KnownOne(BitWidth, 0);
842 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
843 unsigned LeadingZeros = KnownOne.countLeadingZeros();
844 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
845 if ((Mask & KnownZero) == Mask)
846 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
847 APInt(BitWidth, LeadingZeros)));
848
849 }
850 break;
851
852 case Intrinsic::uadd_with_overflow:
853 case Intrinsic::sadd_with_overflow:
854 case Intrinsic::umul_with_overflow:
855 case Intrinsic::smul_with_overflow:
856 if (isa<Constant>(II->getArgOperand(0)) &&
857 !isa<Constant>(II->getArgOperand(1))) {
858 // Canonicalize constants into the RHS.
859 Value *LHS = II->getArgOperand(0);
860 II->setArgOperand(0, II->getArgOperand(1));
861 II->setArgOperand(1, LHS);
862 return II;
863 }
864 // fall through
865
866 case Intrinsic::usub_with_overflow:
867 case Intrinsic::ssub_with_overflow: {
868 OverflowCheckFlavor OCF =
869 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
870 assert(OCF != OCF_INVALID && "unexpected!");
871
872 Value *OperationResult = nullptr;
873 Constant *OverflowResult = nullptr;
874 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
875 *II, OperationResult, OverflowResult))
876 return CreateOverflowTuple(II, OperationResult, OverflowResult);
877
878 break;
879 }
880
881 case Intrinsic::minnum:
882 case Intrinsic::maxnum: {
883 Value *Arg0 = II->getArgOperand(0);
884 Value *Arg1 = II->getArgOperand(1);
885
886 // fmin(x, x) -> x
887 if (Arg0 == Arg1)
888 return ReplaceInstUsesWith(CI, Arg0);
889
890 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
891 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
892
893 // Canonicalize constants into the RHS.
894 if (C0 && !C1) {
895 II->setArgOperand(0, Arg1);
896 II->setArgOperand(1, Arg0);
897 return II;
898 }
899
900 // fmin(x, nan) -> x
901 if (C1 && C1->isNaN())
902 return ReplaceInstUsesWith(CI, Arg0);
903
904 // This is the value because if undef were NaN, we would return the other
905 // value and cannot return a NaN unless both operands are.
906 //
907 // fmin(undef, x) -> x
908 if (isa<UndefValue>(Arg0))
909 return ReplaceInstUsesWith(CI, Arg1);
910
911 // fmin(x, undef) -> x
912 if (isa<UndefValue>(Arg1))
913 return ReplaceInstUsesWith(CI, Arg0);
914
915 Value *X = nullptr;
916 Value *Y = nullptr;
917 if (II->getIntrinsicID() == Intrinsic::minnum) {
918 // fmin(x, fmin(x, y)) -> fmin(x, y)
919 // fmin(y, fmin(x, y)) -> fmin(x, y)
920 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
921 if (Arg0 == X || Arg0 == Y)
922 return ReplaceInstUsesWith(CI, Arg1);
923 }
924
925 // fmin(fmin(x, y), x) -> fmin(x, y)
926 // fmin(fmin(x, y), y) -> fmin(x, y)
927 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
928 if (Arg1 == X || Arg1 == Y)
929 return ReplaceInstUsesWith(CI, Arg0);
930 }
931
932 // TODO: fmin(nnan x, inf) -> x
933 // TODO: fmin(nnan ninf x, flt_max) -> x
934 if (C1 && C1->isInfinity()) {
935 // fmin(x, -inf) -> -inf
936 if (C1->isNegative())
937 return ReplaceInstUsesWith(CI, Arg1);
938 }
939 } else {
940 assert(II->getIntrinsicID() == Intrinsic::maxnum);
941 // fmax(x, fmax(x, y)) -> fmax(x, y)
942 // fmax(y, fmax(x, y)) -> fmax(x, y)
943 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
944 if (Arg0 == X || Arg0 == Y)
945 return ReplaceInstUsesWith(CI, Arg1);
946 }
947
948 // fmax(fmax(x, y), x) -> fmax(x, y)
949 // fmax(fmax(x, y), y) -> fmax(x, y)
950 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
951 if (Arg1 == X || Arg1 == Y)
952 return ReplaceInstUsesWith(CI, Arg0);
953 }
954
955 // TODO: fmax(nnan x, -inf) -> x
956 // TODO: fmax(nnan ninf x, -flt_max) -> x
957 if (C1 && C1->isInfinity()) {
958 // fmax(x, inf) -> inf
959 if (!C1->isNegative())
960 return ReplaceInstUsesWith(CI, Arg1);
961 }
962 }
963 break;
964 }
965 case Intrinsic::ppc_altivec_lvx:
966 case Intrinsic::ppc_altivec_lvxl:
967 // Turn PPC lvx -> load if the pointer is known aligned.
968 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
969 16) {
970 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
971 PointerType::getUnqual(II->getType()));
972 return new LoadInst(Ptr);
973 }
974 break;
975 case Intrinsic::ppc_vsx_lxvw4x:
976 case Intrinsic::ppc_vsx_lxvd2x: {
977 // Turn PPC VSX loads into normal loads.
978 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
979 PointerType::getUnqual(II->getType()));
980 return new LoadInst(Ptr, Twine(""), false, 1);
981 }
982 case Intrinsic::ppc_altivec_stvx:
983 case Intrinsic::ppc_altivec_stvxl:
984 // Turn stvx -> store if the pointer is known aligned.
985 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
986 16) {
987 Type *OpPtrTy =
988 PointerType::getUnqual(II->getArgOperand(0)->getType());
989 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
990 return new StoreInst(II->getArgOperand(0), Ptr);
991 }
992 break;
993 case Intrinsic::ppc_vsx_stxvw4x:
994 case Intrinsic::ppc_vsx_stxvd2x: {
995 // Turn PPC VSX stores into normal stores.
996 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
997 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
998 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
999 }
1000 case Intrinsic::ppc_qpx_qvlfs:
1001 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
1002 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1003 16) {
1004 Type *VTy = VectorType::get(Builder->getFloatTy(),
1005 II->getType()->getVectorNumElements());
1006 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1007 PointerType::getUnqual(VTy));
1008 Value *Load = Builder->CreateLoad(Ptr);
1009 return new FPExtInst(Load, II->getType());
1010 }
1011 break;
1012 case Intrinsic::ppc_qpx_qvlfd:
1013 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
1014 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
1015 32) {
1016 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1017 PointerType::getUnqual(II->getType()));
1018 return new LoadInst(Ptr);
1019 }
1020 break;
1021 case Intrinsic::ppc_qpx_qvstfs:
1022 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
1023 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
1024 16) {
1025 Type *VTy = VectorType::get(Builder->getFloatTy(),
1026 II->getArgOperand(0)->getType()->getVectorNumElements());
1027 Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
1028 Type *OpPtrTy = PointerType::getUnqual(VTy);
1029 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1030 return new StoreInst(TOp, Ptr);
1031 }
1032 break;
1033 case Intrinsic::ppc_qpx_qvstfd:
1034 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
1035 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
1036 32) {
1037 Type *OpPtrTy =
1038 PointerType::getUnqual(II->getArgOperand(0)->getType());
1039 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1040 return new StoreInst(II->getArgOperand(0), Ptr);
1041 }
1042 break;
1043
1044 case Intrinsic::x86_sse_storeu_ps:
1045 case Intrinsic::x86_sse2_storeu_pd:
1046 case Intrinsic::x86_sse2_storeu_dq:
1047 // Turn X86 storeu -> store if the pointer is known aligned.
1048 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1049 16) {
1050 Type *OpPtrTy =
1051 PointerType::getUnqual(II->getArgOperand(1)->getType());
1052 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
1053 return new StoreInst(II->getArgOperand(1), Ptr);
1054 }
1055 break;
1056
1057 case Intrinsic::x86_vcvtph2ps_128:
1058 case Intrinsic::x86_vcvtph2ps_256: {
1059 auto Arg = II->getArgOperand(0);
1060 auto ArgType = cast<VectorType>(Arg->getType());
1061 auto RetType = cast<VectorType>(II->getType());
1062 unsigned ArgWidth = ArgType->getNumElements();
1063 unsigned RetWidth = RetType->getNumElements();
1064 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
1065 assert(ArgType->isIntOrIntVectorTy() &&
1066 ArgType->getScalarSizeInBits() == 16 &&
1067 "CVTPH2PS input type should be 16-bit integer vector");
1068 assert(RetType->getScalarType()->isFloatTy() &&
1069 "CVTPH2PS output type should be 32-bit float vector");
1070
1071 // Constant folding: Convert to generic half to single conversion.
1072 if (isa<ConstantAggregateZero>(Arg))
1073 return ReplaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
1074
1075 if (isa<ConstantDataVector>(Arg)) {
1076 auto VectorHalfAsShorts = Arg;
1077 if (RetWidth < ArgWidth) {
1078 SmallVector<int, 8> SubVecMask;
1079 for (unsigned i = 0; i != RetWidth; ++i)
1080 SubVecMask.push_back((int)i);
1081 VectorHalfAsShorts = Builder->CreateShuffleVector(
1082 Arg, UndefValue::get(ArgType), SubVecMask);
1083 }
1084
1085 auto VectorHalfType =
1086 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
1087 auto VectorHalfs =
1088 Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
1089 auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
1090 return ReplaceInstUsesWith(*II, VectorFloats);
1091 }
1092
1093 // We only use the lowest lanes of the argument.
1094 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
1095 II->setArgOperand(0, V);
1096 return II;
1097 }
1098 break;
1099 }
1100
1101 case Intrinsic::x86_sse_cvtss2si:
1102 case Intrinsic::x86_sse_cvtss2si64:
1103 case Intrinsic::x86_sse_cvttss2si:
1104 case Intrinsic::x86_sse_cvttss2si64:
1105 case Intrinsic::x86_sse2_cvtsd2si:
1106 case Intrinsic::x86_sse2_cvtsd2si64:
1107 case Intrinsic::x86_sse2_cvttsd2si:
1108 case Intrinsic::x86_sse2_cvttsd2si64: {
1109 // These intrinsics only demand the 0th element of their input vectors. If
1110 // we can simplify the input based on that, do so now.
1111 Value *Arg = II->getArgOperand(0);
1112 unsigned VWidth = Arg->getType()->getVectorNumElements();
1113 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
1114 II->setArgOperand(0, V);
1115 return II;
1116 }
1117 break;
1118 }
1119
1120 // Constant fold ashr( <A x Bi>, Ci ).
1121 // Constant fold lshr( <A x Bi>, Ci ).
1122 // Constant fold shl( <A x Bi>, Ci ).
1123 case Intrinsic::x86_sse2_psrai_d:
1124 case Intrinsic::x86_sse2_psrai_w:
1125 case Intrinsic::x86_avx2_psrai_d:
1126 case Intrinsic::x86_avx2_psrai_w:
1127 case Intrinsic::x86_sse2_psrli_d:
1128 case Intrinsic::x86_sse2_psrli_q:
1129 case Intrinsic::x86_sse2_psrli_w:
1130 case Intrinsic::x86_avx2_psrli_d:
1131 case Intrinsic::x86_avx2_psrli_q:
1132 case Intrinsic::x86_avx2_psrli_w:
1133 case Intrinsic::x86_sse2_pslli_d:
1134 case Intrinsic::x86_sse2_pslli_q:
1135 case Intrinsic::x86_sse2_pslli_w:
1136 case Intrinsic::x86_avx2_pslli_d:
1137 case Intrinsic::x86_avx2_pslli_q:
1138 case Intrinsic::x86_avx2_pslli_w:
1139 if (Value *V = SimplifyX86immshift(*II, *Builder))
1140 return ReplaceInstUsesWith(*II, V);
1141 break;
1142
1143 case Intrinsic::x86_sse2_psra_d:
1144 case Intrinsic::x86_sse2_psra_w:
1145 case Intrinsic::x86_avx2_psra_d:
1146 case Intrinsic::x86_avx2_psra_w:
1147 case Intrinsic::x86_sse2_psrl_d:
1148 case Intrinsic::x86_sse2_psrl_q:
1149 case Intrinsic::x86_sse2_psrl_w:
1150 case Intrinsic::x86_avx2_psrl_d:
1151 case Intrinsic::x86_avx2_psrl_q:
1152 case Intrinsic::x86_avx2_psrl_w:
1153 case Intrinsic::x86_sse2_psll_d:
1154 case Intrinsic::x86_sse2_psll_q:
1155 case Intrinsic::x86_sse2_psll_w:
1156 case Intrinsic::x86_avx2_psll_d:
1157 case Intrinsic::x86_avx2_psll_q:
1158 case Intrinsic::x86_avx2_psll_w: {
1159 if (Value *V = SimplifyX86immshift(*II, *Builder))
1160 return ReplaceInstUsesWith(*II, V);
1161
1162 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
1163 // operand to compute the shift amount.
1164 Value *Arg1 = II->getArgOperand(1);
1165 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
1166 "Unexpected packed shift size");
1167 unsigned VWidth = Arg1->getType()->getVectorNumElements();
1168
1169 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
1170 II->setArgOperand(1, V);
1171 return II;
1172 }
1173 break;
1174 }
1175
1176 case Intrinsic::x86_avx2_pmovsxbd:
1177 case Intrinsic::x86_avx2_pmovsxbq:
1178 case Intrinsic::x86_avx2_pmovsxbw:
1179 case Intrinsic::x86_avx2_pmovsxdq:
1180 case Intrinsic::x86_avx2_pmovsxwd:
1181 case Intrinsic::x86_avx2_pmovsxwq:
1182 if (Value *V = SimplifyX86extend(*II, *Builder, true))
1183 return ReplaceInstUsesWith(*II, V);
1184 break;
1185
1186 case Intrinsic::x86_sse41_pmovzxbd:
1187 case Intrinsic::x86_sse41_pmovzxbq:
1188 case Intrinsic::x86_sse41_pmovzxbw:
1189 case Intrinsic::x86_sse41_pmovzxdq:
1190 case Intrinsic::x86_sse41_pmovzxwd:
1191 case Intrinsic::x86_sse41_pmovzxwq:
1192 case Intrinsic::x86_avx2_pmovzxbd:
1193 case Intrinsic::x86_avx2_pmovzxbq:
1194 case Intrinsic::x86_avx2_pmovzxbw:
1195 case Intrinsic::x86_avx2_pmovzxdq:
1196 case Intrinsic::x86_avx2_pmovzxwd:
1197 case Intrinsic::x86_avx2_pmovzxwq:
1198 if (Value *V = SimplifyX86extend(*II, *Builder, false))
1199 return ReplaceInstUsesWith(*II, V);
1200 break;
1201
1202 case Intrinsic::x86_sse41_insertps:
1203 if (Value *V = SimplifyX86insertps(*II, *Builder))
1204 return ReplaceInstUsesWith(*II, V);
1205 break;
1206
1207 case Intrinsic::x86_sse4a_extrq: {
1208 Value *Op0 = II->getArgOperand(0);
1209 Value *Op1 = II->getArgOperand(1);
1210 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1211 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1212 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1213 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1214 VWidth1 == 16 && "Unexpected operand sizes");
1215
1216 // See if we're dealing with constant values.
1217 Constant *C1 = dyn_cast<Constant>(Op1);
1218 ConstantInt *CILength =
1219 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
1220 : nullptr;
1221 ConstantInt *CIIndex =
1222 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1223 : nullptr;
1224
1225 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
1226 if (Value *V = SimplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1227 return ReplaceInstUsesWith(*II, V);
1228
1229 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
1230 // operands and the lowest 16-bits of the second.
1231 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1232 II->setArgOperand(0, V);
1233 return II;
1234 }
1235 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
1236 II->setArgOperand(1, V);
1237 return II;
1238 }
1239 break;
1240 }
1241
1242 case Intrinsic::x86_sse4a_extrqi: {
1243 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
1244 // bits of the lower 64-bits. The upper 64-bits are undefined.
1245 Value *Op0 = II->getArgOperand(0);
1246 unsigned VWidth = Op0->getType()->getVectorNumElements();
1247 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1248 "Unexpected operand size");
1249
1250 // See if we're dealing with constant values.
1251 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
1252 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
1253
1254 // Attempt to simplify to a constant or shuffle vector.
1255 if (Value *V = SimplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1256 return ReplaceInstUsesWith(*II, V);
1257
1258 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
1259 // operand.
1260 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1261 II->setArgOperand(0, V);
1262 return II;
1263 }
1264 break;
1265 }
1266
1267 case Intrinsic::x86_sse4a_insertq: {
1268 Value *Op0 = II->getArgOperand(0);
1269 Value *Op1 = II->getArgOperand(1);
1270 unsigned VWidth = Op0->getType()->getVectorNumElements();
1271 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1272 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1273 Op1->getType()->getVectorNumElements() == 2 &&
1274 "Unexpected operand size");
1275
1276 // See if we're dealing with constant values.
1277 Constant *C1 = dyn_cast<Constant>(Op1);
1278 ConstantInt *CI11 =
1279 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1280 : nullptr;
1281
1282 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
1283 if (CI11) {
1284 APInt V11 = CI11->getValue();
1285 APInt Len = V11.zextOrTrunc(6);
1286 APInt Idx = V11.lshr(8).zextOrTrunc(6);
1287 if (Value *V = SimplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1288 return ReplaceInstUsesWith(*II, V);
1289 }
1290
1291 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
1292 // operand.
1293 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1294 II->setArgOperand(0, V);
1295 return II;
1296 }
1297 break;
1298 }
1299
1300 case Intrinsic::x86_sse4a_insertqi: {
1301 // INSERTQI: Extract lowest Length bits from lower half of second source and
1302 // insert over first source starting at Index bit. The upper 64-bits are
1303 // undefined.
1304 Value *Op0 = II->getArgOperand(0);
1305 Value *Op1 = II->getArgOperand(1);
1306 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1307 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1308 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1309 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1310 VWidth1 == 2 && "Unexpected operand sizes");
1311
1312 // See if we're dealing with constant values.
1313 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
1314 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
1315
1316 // Attempt to simplify to a constant or shuffle vector.
1317 if (CILength && CIIndex) {
1318 APInt Len = CILength->getValue().zextOrTrunc(6);
1319 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
1320 if (Value *V = SimplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1321 return ReplaceInstUsesWith(*II, V);
1322 }
1323
1324 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
1325 // operands.
1326 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1327 II->setArgOperand(0, V);
1328 return II;
1329 }
1330
1331 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
1332 II->setArgOperand(1, V);
1333 return II;
1334 }
1335 break;
1336 }
1337
1338 case Intrinsic::x86_sse41_pblendvb:
1339 case Intrinsic::x86_sse41_blendvps:
1340 case Intrinsic::x86_sse41_blendvpd:
1341 case Intrinsic::x86_avx_blendv_ps_256:
1342 case Intrinsic::x86_avx_blendv_pd_256:
1343 case Intrinsic::x86_avx2_pblendvb: {
1344 // Convert blendv* to vector selects if the mask is constant.
1345 // This optimization is convoluted because the intrinsic is defined as
1346 // getting a vector of floats or doubles for the ps and pd versions.
1347 // FIXME: That should be changed.
1348
1349 Value *Op0 = II->getArgOperand(0);
1350 Value *Op1 = II->getArgOperand(1);
1351 Value *Mask = II->getArgOperand(2);
1352
1353 // fold (blend A, A, Mask) -> A
1354 if (Op0 == Op1)
1355 return ReplaceInstUsesWith(CI, Op0);
1356
1357 // Zero Mask - select 1st argument.
1358 if (isa<ConstantAggregateZero>(Mask))
1359 return ReplaceInstUsesWith(CI, Op0);
1360
1361 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
1362 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
1363 auto Tyi1 = Builder->getInt1Ty();
1364 auto SelectorType = cast<VectorType>(Mask->getType());
1365 auto EltTy = SelectorType->getElementType();
1366 unsigned Size = SelectorType->getNumElements();
1367 unsigned BitWidth =
1368 EltTy->isFloatTy()
1369 ? 32
1370 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
1371 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
1372 "Wrong arguments for variable blend intrinsic");
1373 SmallVector<Constant *, 32> Selectors;
1374 for (unsigned I = 0; I < Size; ++I) {
1375 // The intrinsics only read the top bit
1376 uint64_t Selector;
1377 if (BitWidth == 8)
1378 Selector = C->getElementAsInteger(I);
1379 else
1380 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
1381 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
1382 }
1383 auto NewSelector = ConstantVector::get(Selectors);
1384 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
1385 }
1386 break;
1387 }
1388
1389 case Intrinsic::x86_ssse3_pshuf_b_128:
1390 case Intrinsic::x86_avx2_pshuf_b: {
1391 // Turn pshufb(V1,mask) -> shuffle(V1,Zero,mask) if mask is a constant.
1392 auto *V = II->getArgOperand(1);
1393 auto *VTy = cast<VectorType>(V->getType());
1394 unsigned NumElts = VTy->getNumElements();
1395 assert((NumElts == 16 || NumElts == 32) &&
1396 "Unexpected number of elements in shuffle mask!");
1397 // Initialize the resulting shuffle mask to all zeroes.
1398 uint32_t Indexes[32] = {0};
1399
1400 if (auto *Mask = dyn_cast<ConstantDataVector>(V)) {
1401 // Each byte in the shuffle control mask forms an index to permute the
1402 // corresponding byte in the destination operand.
1403 for (unsigned I = 0; I < NumElts; ++I) {
1404 int8_t Index = Mask->getElementAsInteger(I);
1405 // If the most significant bit (bit[7]) of each byte of the shuffle
1406 // control mask is set, then zero is written in the result byte.
1407 // The zero vector is in the right-hand side of the resulting
1408 // shufflevector.
1409
1410 // The value of each index is the least significant 4 bits of the
1411 // shuffle control byte.
1412 Indexes[I] = (Index < 0) ? NumElts : Index & 0xF;
1413 }
1414 } else if (!isa<ConstantAggregateZero>(V))
1415 break;
1416
1417 // The value of each index for the high 128-bit lane is the least
1418 // significant 4 bits of the respective shuffle control byte.
1419 for (unsigned I = 16; I < NumElts; ++I)
1420 Indexes[I] += I & 0xF0;
1421
1422 auto NewC = ConstantDataVector::get(V->getContext(),
1423 makeArrayRef(Indexes, NumElts));
1424 auto V1 = II->getArgOperand(0);
1425 auto V2 = Constant::getNullValue(II->getType());
1426 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1427 return ReplaceInstUsesWith(CI, Shuffle);
1428 }
1429
1430 case Intrinsic::x86_avx_vpermilvar_ps:
1431 case Intrinsic::x86_avx_vpermilvar_ps_256:
1432 case Intrinsic::x86_avx_vpermilvar_pd:
1433 case Intrinsic::x86_avx_vpermilvar_pd_256: {
1434 // Convert vpermil* to shufflevector if the mask is constant.
1435 Value *V = II->getArgOperand(1);
1436 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
1437 assert(Size == 8 || Size == 4 || Size == 2);
1438 uint32_t Indexes[8];
1439 if (auto C = dyn_cast<ConstantDataVector>(V)) {
1440 // The intrinsics only read one or two bits, clear the rest.
1441 for (unsigned I = 0; I < Size; ++I) {
1442 uint32_t Index = C->getElementAsInteger(I) & 0x3;
1443 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
1444 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
1445 Index >>= 1;
1446 Indexes[I] = Index;
1447 }
1448 } else if (isa<ConstantAggregateZero>(V)) {
1449 for (unsigned I = 0; I < Size; ++I)
1450 Indexes[I] = 0;
1451 } else {
1452 break;
1453 }
1454 // The _256 variants are a bit trickier since the mask bits always index
1455 // into the corresponding 128 half. In order to convert to a generic
1456 // shuffle, we have to make that explicit.
1457 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
1458 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
1459 for (unsigned I = Size / 2; I < Size; ++I)
1460 Indexes[I] += Size / 2;
1461 }
1462 auto NewC =
1463 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
1464 auto V1 = II->getArgOperand(0);
1465 auto V2 = UndefValue::get(V1->getType());
1466 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1467 return ReplaceInstUsesWith(CI, Shuffle);
1468 }
1469
1470 case Intrinsic::x86_avx_vperm2f128_pd_256:
1471 case Intrinsic::x86_avx_vperm2f128_ps_256:
1472 case Intrinsic::x86_avx_vperm2f128_si_256:
1473 case Intrinsic::x86_avx2_vperm2i128:
1474 if (Value *V = SimplifyX86vperm2(*II, *Builder))
1475 return ReplaceInstUsesWith(*II, V);
1476 break;
1477
1478 case Intrinsic::x86_xop_vpcomb:
1479 case Intrinsic::x86_xop_vpcomd:
1480 case Intrinsic::x86_xop_vpcomq:
1481 case Intrinsic::x86_xop_vpcomw:
1482 if (Value *V = SimplifyX86vpcom(*II, *Builder, true))
1483 return ReplaceInstUsesWith(*II, V);
1484 break;
1485
1486 case Intrinsic::x86_xop_vpcomub:
1487 case Intrinsic::x86_xop_vpcomud:
1488 case Intrinsic::x86_xop_vpcomuq:
1489 case Intrinsic::x86_xop_vpcomuw:
1490 if (Value *V = SimplifyX86vpcom(*II, *Builder, false))
1491 return ReplaceInstUsesWith(*II, V);
1492 break;
1493
1494 case Intrinsic::ppc_altivec_vperm:
1495 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
1496 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
1497 // a vectorshuffle for little endian, we must undo the transformation
1498 // performed on vec_perm in altivec.h. That is, we must complement
1499 // the permutation mask with respect to 31 and reverse the order of
1500 // V1 and V2.
1501 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
1502 assert(Mask->getType()->getVectorNumElements() == 16 &&
1503 "Bad type for intrinsic!");
1504
1505 // Check that all of the elements are integer constants or undefs.
1506 bool AllEltsOk = true;
1507 for (unsigned i = 0; i != 16; ++i) {
1508 Constant *Elt = Mask->getAggregateElement(i);
1509 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
1510 AllEltsOk = false;
1511 break;
1512 }
1513 }
1514
1515 if (AllEltsOk) {
1516 // Cast the input vectors to byte vectors.
1517 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
1518 Mask->getType());
1519 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
1520 Mask->getType());
1521 Value *Result = UndefValue::get(Op0->getType());
1522
1523 // Only extract each element once.
1524 Value *ExtractedElts[32];
1525 memset(ExtractedElts, 0, sizeof(ExtractedElts));
1526
1527 for (unsigned i = 0; i != 16; ++i) {
1528 if (isa<UndefValue>(Mask->getAggregateElement(i)))
1529 continue;
1530 unsigned Idx =
1531 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
1532 Idx &= 31; // Match the hardware behavior.
1533 if (DL.isLittleEndian())
1534 Idx = 31 - Idx;
1535
1536 if (!ExtractedElts[Idx]) {
1537 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
1538 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
1539 ExtractedElts[Idx] =
1540 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
1541 Builder->getInt32(Idx&15));
1542 }
1543
1544 // Insert this value into the result vector.
1545 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
1546 Builder->getInt32(i));
1547 }
1548 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
1549 }
1550 }
1551 break;
1552
1553 case Intrinsic::arm_neon_vld1:
1554 case Intrinsic::arm_neon_vld2:
1555 case Intrinsic::arm_neon_vld3:
1556 case Intrinsic::arm_neon_vld4:
1557 case Intrinsic::arm_neon_vld2lane:
1558 case Intrinsic::arm_neon_vld3lane:
1559 case Intrinsic::arm_neon_vld4lane:
1560 case Intrinsic::arm_neon_vst1:
1561 case Intrinsic::arm_neon_vst2:
1562 case Intrinsic::arm_neon_vst3:
1563 case Intrinsic::arm_neon_vst4:
1564 case Intrinsic::arm_neon_vst2lane:
1565 case Intrinsic::arm_neon_vst3lane:
1566 case Intrinsic::arm_neon_vst4lane: {
1567 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
1568 unsigned AlignArg = II->getNumArgOperands() - 1;
1569 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
1570 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
1571 II->setArgOperand(AlignArg,
1572 ConstantInt::get(Type::getInt32Ty(II->getContext()),
1573 MemAlign, false));
1574 return II;
1575 }
1576 break;
1577 }
1578
1579 case Intrinsic::arm_neon_vmulls:
1580 case Intrinsic::arm_neon_vmullu:
1581 case Intrinsic::aarch64_neon_smull:
1582 case Intrinsic::aarch64_neon_umull: {
1583 Value *Arg0 = II->getArgOperand(0);
1584 Value *Arg1 = II->getArgOperand(1);
1585
1586 // Handle mul by zero first:
1587 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1588 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1589 }
1590
1591 // Check for constant LHS & RHS - in this case we just simplify.
1592 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
1593 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
1594 VectorType *NewVT = cast<VectorType>(II->getType());
1595 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1596 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1597 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1598 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1599
1600 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1601 }
1602
1603 // Couldn't simplify - canonicalize constant to the RHS.
1604 std::swap(Arg0, Arg1);
1605 }
1606
1607 // Handle mul by one:
1608 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1609 if (ConstantInt *Splat =
1610 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1611 if (Splat->isOne())
1612 return CastInst::CreateIntegerCast(Arg0, II->getType(),
1613 /*isSigned=*/!Zext);
1614
1615 break;
1616 }
1617
1618 case Intrinsic::AMDGPU_rcp: {
1619 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
1620 const APFloat &ArgVal = C->getValueAPF();
1621 APFloat Val(ArgVal.getSemantics(), 1.0);
1622 APFloat::opStatus Status = Val.divide(ArgVal,
1623 APFloat::rmNearestTiesToEven);
1624 // Only do this if it was exact and therefore not dependent on the
1625 // rounding mode.
1626 if (Status == APFloat::opOK)
1627 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1628 }
1629
1630 break;
1631 }
1632 case Intrinsic::stackrestore: {
1633 // If the save is right next to the restore, remove the restore. This can
1634 // happen when variable allocas are DCE'd.
1635 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1636 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1637 if (&*++SS->getIterator() == II)
1638 return EraseInstFromFunction(CI);
1639 }
1640 }
1641
1642 // Scan down this block to see if there is another stack restore in the
1643 // same block without an intervening call/alloca.
1644 BasicBlock::iterator BI(II);
1645 TerminatorInst *TI = II->getParent()->getTerminator();
1646 bool CannotRemove = false;
1647 for (++BI; &*BI != TI; ++BI) {
1648 if (isa<AllocaInst>(BI)) {
1649 CannotRemove = true;
1650 break;
1651 }
1652 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1653 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1654 // If there is a stackrestore below this one, remove this one.
1655 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1656 return EraseInstFromFunction(CI);
1657 // Otherwise, ignore the intrinsic.
1658 } else {
1659 // If we found a non-intrinsic call, we can't remove the stack
1660 // restore.
1661 CannotRemove = true;
1662 break;
1663 }
1664 }
1665 }
1666
1667 // If the stack restore is in a return, resume, or unwind block and if there
1668 // are no allocas or calls between the restore and the return, nuke the
1669 // restore.
1670 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1671 return EraseInstFromFunction(CI);
1672 break;
1673 }
1674 case Intrinsic::lifetime_start: {
1675 // Remove trivially empty lifetime_start/end ranges, i.e. a start
1676 // immediately followed by an end (ignoring debuginfo or other
1677 // lifetime markers in between).
1678 BasicBlock::iterator BI = II->getIterator(), BE = II->getParent()->end();
1679 for (++BI; BI != BE; ++BI) {
1680 if (IntrinsicInst *LTE = dyn_cast<IntrinsicInst>(BI)) {
1681 if (isa<DbgInfoIntrinsic>(LTE) ||
1682 LTE->getIntrinsicID() == Intrinsic::lifetime_start)
1683 continue;
1684 if (LTE->getIntrinsicID() == Intrinsic::lifetime_end) {
1685 if (II->getOperand(0) == LTE->getOperand(0) &&
1686 II->getOperand(1) == LTE->getOperand(1)) {
1687 EraseInstFromFunction(*LTE);
1688 return EraseInstFromFunction(*II);
1689 }
1690 continue;
1691 }
1692 }
1693 break;
1694 }
1695 break;
1696 }
1697 case Intrinsic::assume: {
1698 // Canonicalize assume(a && b) -> assume(a); assume(b);
1699 // Note: New assumption intrinsics created here are registered by
1700 // the InstCombineIRInserter object.
1701 Value *IIOperand = II->getArgOperand(0), *A, *B,
1702 *AssumeIntrinsic = II->getCalledValue();
1703 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1704 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1705 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1706 return EraseInstFromFunction(*II);
1707 }
1708 // assume(!(a || b)) -> assume(!a); assume(!b);
1709 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1710 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1711 II->getName());
1712 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1713 II->getName());
1714 return EraseInstFromFunction(*II);
1715 }
1716
1717 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1718 // (if assume is valid at the load)
1719 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1720 Value *LHS = ICmp->getOperand(0);
1721 Value *RHS = ICmp->getOperand(1);
1722 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1723 isa<LoadInst>(LHS) &&
1724 isa<Constant>(RHS) &&
1725 RHS->getType()->isPointerTy() &&
1726 cast<Constant>(RHS)->isNullValue()) {
1727 LoadInst* LI = cast<LoadInst>(LHS);
1728 if (isValidAssumeForContext(II, LI, DT)) {
1729 MDNode *MD = MDNode::get(II->getContext(), None);
1730 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1731 return EraseInstFromFunction(*II);
1732 }
1733 }
1734 // TODO: apply nonnull return attributes to calls and invokes
1735 // TODO: apply range metadata for range check patterns?
1736 }
1737 // If there is a dominating assume with the same condition as this one,
1738 // then this one is redundant, and should be removed.
1739 APInt KnownZero(1, 0), KnownOne(1, 0);
1740 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1741 if (KnownOne.isAllOnesValue())
1742 return EraseInstFromFunction(*II);
1743
1744 break;
1745 }
1746 case Intrinsic::experimental_gc_relocate: {
1747 // Translate facts known about a pointer before relocating into
1748 // facts about the relocate value, while being careful to
1749 // preserve relocation semantics.
1750 GCRelocateOperands Operands(II);
1751 Value *DerivedPtr = Operands.getDerivedPtr();
1752 auto *GCRelocateType = cast<PointerType>(II->getType());
1753
1754 // Remove the relocation if unused, note that this check is required
1755 // to prevent the cases below from looping forever.
1756 if (II->use_empty())
1757 return EraseInstFromFunction(*II);
1758
1759 // Undef is undef, even after relocation.
1760 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1761 // most practical collectors, but there was discussion in the review thread
1762 // about whether it was legal for all possible collectors.
1763 if (isa<UndefValue>(DerivedPtr)) {
1764 // gc_relocate is uncasted. Use undef of gc_relocate's type to replace it.
1765 return ReplaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
1766 }
1767
1768 // The relocation of null will be null for most any collector.
1769 // TODO: provide a hook for this in GCStrategy. There might be some weird
1770 // collector this property does not hold for.
1771 if (isa<ConstantPointerNull>(DerivedPtr)) {
1772 // gc_relocate is uncasted. Use null-pointer of gc_relocate's type to replace it.
1773 return ReplaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
1774 }
1775
1776 // isKnownNonNull -> nonnull attribute
1777 if (isKnownNonNullAt(DerivedPtr, II, DT, TLI))
1778 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1779
1780 // isDereferenceablePointer -> deref attribute
1781 if (isDereferenceablePointer(DerivedPtr, DL)) {
1782 if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
1783 uint64_t Bytes = A->getDereferenceableBytes();
1784 II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);
1785 }
1786 }
1787
1788 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1789 // Canonicalize on the type from the uses to the defs
1790
1791 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1792 }
1793 }
1794
1795 return visitCallSite(II);
1796 }
1797
1798 // InvokeInst simplification
1799 //
visitInvokeInst(InvokeInst & II)1800 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1801 return visitCallSite(&II);
1802 }
1803
1804 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1805 /// passed through the varargs area, we can eliminate the use of the cast.
isSafeToEliminateVarargsCast(const CallSite CS,const DataLayout & DL,const CastInst * const CI,const int ix)1806 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1807 const DataLayout &DL,
1808 const CastInst *const CI,
1809 const int ix) {
1810 if (!CI->isLosslessCast())
1811 return false;
1812
1813 // If this is a GC intrinsic, avoid munging types. We need types for
1814 // statepoint reconstruction in SelectionDAG.
1815 // TODO: This is probably something which should be expanded to all
1816 // intrinsics since the entire point of intrinsics is that
1817 // they are understandable by the optimizer.
1818 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1819 return false;
1820
1821 // The size of ByVal or InAlloca arguments is derived from the type, so we
1822 // can't change to a type with a different size. If the size were
1823 // passed explicitly we could avoid this check.
1824 if (!CS.isByValOrInAllocaArgument(ix))
1825 return true;
1826
1827 Type* SrcTy =
1828 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1829 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1830 if (!SrcTy->isSized() || !DstTy->isSized())
1831 return false;
1832 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
1833 return false;
1834 return true;
1835 }
1836
1837 // Try to fold some different type of calls here.
1838 // Currently we're only working with the checking functions, memcpy_chk,
1839 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1840 // strcat_chk and strncat_chk.
tryOptimizeCall(CallInst * CI)1841 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
1842 if (!CI->getCalledFunction()) return nullptr;
1843
1844 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
1845 ReplaceInstUsesWith(*From, With);
1846 };
1847 LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
1848 if (Value *With = Simplifier.optimizeCall(CI)) {
1849 ++NumSimplified;
1850 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1851 }
1852
1853 return nullptr;
1854 }
1855
FindInitTrampolineFromAlloca(Value * TrampMem)1856 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1857 // Strip off at most one level of pointer casts, looking for an alloca. This
1858 // is good enough in practice and simpler than handling any number of casts.
1859 Value *Underlying = TrampMem->stripPointerCasts();
1860 if (Underlying != TrampMem &&
1861 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1862 return nullptr;
1863 if (!isa<AllocaInst>(Underlying))
1864 return nullptr;
1865
1866 IntrinsicInst *InitTrampoline = nullptr;
1867 for (User *U : TrampMem->users()) {
1868 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1869 if (!II)
1870 return nullptr;
1871 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1872 if (InitTrampoline)
1873 // More than one init_trampoline writes to this value. Give up.
1874 return nullptr;
1875 InitTrampoline = II;
1876 continue;
1877 }
1878 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1879 // Allow any number of calls to adjust.trampoline.
1880 continue;
1881 return nullptr;
1882 }
1883
1884 // No call to init.trampoline found.
1885 if (!InitTrampoline)
1886 return nullptr;
1887
1888 // Check that the alloca is being used in the expected way.
1889 if (InitTrampoline->getOperand(0) != TrampMem)
1890 return nullptr;
1891
1892 return InitTrampoline;
1893 }
1894
FindInitTrampolineFromBB(IntrinsicInst * AdjustTramp,Value * TrampMem)1895 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1896 Value *TrampMem) {
1897 // Visit all the previous instructions in the basic block, and try to find a
1898 // init.trampoline which has a direct path to the adjust.trampoline.
1899 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
1900 E = AdjustTramp->getParent()->begin();
1901 I != E;) {
1902 Instruction *Inst = &*--I;
1903 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1904 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1905 II->getOperand(0) == TrampMem)
1906 return II;
1907 if (Inst->mayWriteToMemory())
1908 return nullptr;
1909 }
1910 return nullptr;
1911 }
1912
1913 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1914 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1915 // to a direct call to a function. Otherwise return NULL.
1916 //
FindInitTrampoline(Value * Callee)1917 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1918 Callee = Callee->stripPointerCasts();
1919 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1920 if (!AdjustTramp ||
1921 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1922 return nullptr;
1923
1924 Value *TrampMem = AdjustTramp->getOperand(0);
1925
1926 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1927 return IT;
1928 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1929 return IT;
1930 return nullptr;
1931 }
1932
1933 // visitCallSite - Improvements for call and invoke instructions.
1934 //
visitCallSite(CallSite CS)1935 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1936
1937 if (isAllocLikeFn(CS.getInstruction(), TLI))
1938 return visitAllocSite(*CS.getInstruction());
1939
1940 bool Changed = false;
1941
1942 // Mark any parameters that are known to be non-null with the nonnull
1943 // attribute. This is helpful for inlining calls to functions with null
1944 // checks on their arguments.
1945 SmallVector<unsigned, 4> Indices;
1946 unsigned ArgNo = 0;
1947
1948 for (Value *V : CS.args()) {
1949 if (V->getType()->isPointerTy() && !CS.paramHasAttr(ArgNo+1, Attribute::NonNull) &&
1950 isKnownNonNullAt(V, CS.getInstruction(), DT, TLI))
1951 Indices.push_back(ArgNo + 1);
1952 ArgNo++;
1953 }
1954
1955 assert(ArgNo == CS.arg_size() && "sanity check");
1956
1957 if (!Indices.empty()) {
1958 AttributeSet AS = CS.getAttributes();
1959 LLVMContext &Ctx = CS.getInstruction()->getContext();
1960 AS = AS.addAttribute(Ctx, Indices,
1961 Attribute::get(Ctx, Attribute::NonNull));
1962 CS.setAttributes(AS);
1963 Changed = true;
1964 }
1965
1966 // If the callee is a pointer to a function, attempt to move any casts to the
1967 // arguments of the call/invoke.
1968 Value *Callee = CS.getCalledValue();
1969 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1970 return nullptr;
1971
1972 if (Function *CalleeF = dyn_cast<Function>(Callee))
1973 // If the call and callee calling conventions don't match, this call must
1974 // be unreachable, as the call is undefined.
1975 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1976 // Only do this for calls to a function with a body. A prototype may
1977 // not actually end up matching the implementation's calling conv for a
1978 // variety of reasons (e.g. it may be written in assembly).
1979 !CalleeF->isDeclaration()) {
1980 Instruction *OldCall = CS.getInstruction();
1981 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1982 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1983 OldCall);
1984 // If OldCall does not return void then replaceAllUsesWith undef.
1985 // This allows ValueHandlers and custom metadata to adjust itself.
1986 if (!OldCall->getType()->isVoidTy())
1987 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1988 if (isa<CallInst>(OldCall))
1989 return EraseInstFromFunction(*OldCall);
1990
1991 // We cannot remove an invoke, because it would change the CFG, just
1992 // change the callee to a null pointer.
1993 cast<InvokeInst>(OldCall)->setCalledFunction(
1994 Constant::getNullValue(CalleeF->getType()));
1995 return nullptr;
1996 }
1997
1998 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1999 // If CS does not return void then replaceAllUsesWith undef.
2000 // This allows ValueHandlers and custom metadata to adjust itself.
2001 if (!CS.getInstruction()->getType()->isVoidTy())
2002 ReplaceInstUsesWith(*CS.getInstruction(),
2003 UndefValue::get(CS.getInstruction()->getType()));
2004
2005 if (isa<InvokeInst>(CS.getInstruction())) {
2006 // Can't remove an invoke because we cannot change the CFG.
2007 return nullptr;
2008 }
2009
2010 // This instruction is not reachable, just remove it. We insert a store to
2011 // undef so that we know that this code is not reachable, despite the fact
2012 // that we can't modify the CFG here.
2013 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
2014 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
2015 CS.getInstruction());
2016
2017 return EraseInstFromFunction(*CS.getInstruction());
2018 }
2019
2020 if (IntrinsicInst *II = FindInitTrampoline(Callee))
2021 return transformCallThroughTrampoline(CS, II);
2022
2023 PointerType *PTy = cast<PointerType>(Callee->getType());
2024 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2025 if (FTy->isVarArg()) {
2026 int ix = FTy->getNumParams();
2027 // See if we can optimize any arguments passed through the varargs area of
2028 // the call.
2029 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
2030 E = CS.arg_end(); I != E; ++I, ++ix) {
2031 CastInst *CI = dyn_cast<CastInst>(*I);
2032 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
2033 *I = CI->getOperand(0);
2034 Changed = true;
2035 }
2036 }
2037 }
2038
2039 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
2040 // Inline asm calls cannot throw - mark them 'nounwind'.
2041 CS.setDoesNotThrow();
2042 Changed = true;
2043 }
2044
2045 // Try to optimize the call if possible, we require DataLayout for most of
2046 // this. None of these calls are seen as possibly dead so go ahead and
2047 // delete the instruction now.
2048 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
2049 Instruction *I = tryOptimizeCall(CI);
2050 // If we changed something return the result, etc. Otherwise let
2051 // the fallthrough check.
2052 if (I) return EraseInstFromFunction(*I);
2053 }
2054
2055 return Changed ? CS.getInstruction() : nullptr;
2056 }
2057
2058 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
2059 // attempt to move the cast to the arguments of the call/invoke.
2060 //
transformConstExprCastCall(CallSite CS)2061 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
2062 Function *Callee =
2063 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2064 if (!Callee)
2065 return false;
2066 // The prototype of thunks are a lie, don't try to directly call such
2067 // functions.
2068 if (Callee->hasFnAttribute("thunk"))
2069 return false;
2070 Instruction *Caller = CS.getInstruction();
2071 const AttributeSet &CallerPAL = CS.getAttributes();
2072
2073 // Okay, this is a cast from a function to a different type. Unless doing so
2074 // would cause a type conversion of one of our arguments, change this call to
2075 // be a direct call with arguments casted to the appropriate types.
2076 //
2077 FunctionType *FT = Callee->getFunctionType();
2078 Type *OldRetTy = Caller->getType();
2079 Type *NewRetTy = FT->getReturnType();
2080
2081 // Check to see if we are changing the return type...
2082 if (OldRetTy != NewRetTy) {
2083
2084 if (NewRetTy->isStructTy())
2085 return false; // TODO: Handle multiple return values.
2086
2087 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
2088 if (Callee->isDeclaration())
2089 return false; // Cannot transform this return value.
2090
2091 if (!Caller->use_empty() &&
2092 // void -> non-void is handled specially
2093 !NewRetTy->isVoidTy())
2094 return false; // Cannot transform this return value.
2095 }
2096
2097 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
2098 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2099 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
2100 return false; // Attribute not compatible with transformed value.
2101 }
2102
2103 // If the callsite is an invoke instruction, and the return value is used by
2104 // a PHI node in a successor, we cannot change the return type of the call
2105 // because there is no place to put the cast instruction (without breaking
2106 // the critical edge). Bail out in this case.
2107 if (!Caller->use_empty())
2108 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
2109 for (User *U : II->users())
2110 if (PHINode *PN = dyn_cast<PHINode>(U))
2111 if (PN->getParent() == II->getNormalDest() ||
2112 PN->getParent() == II->getUnwindDest())
2113 return false;
2114 }
2115
2116 unsigned NumActualArgs = CS.arg_size();
2117 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
2118
2119 // Prevent us turning:
2120 // declare void @takes_i32_inalloca(i32* inalloca)
2121 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
2122 //
2123 // into:
2124 // call void @takes_i32_inalloca(i32* null)
2125 //
2126 // Similarly, avoid folding away bitcasts of byval calls.
2127 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
2128 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
2129 return false;
2130
2131 CallSite::arg_iterator AI = CS.arg_begin();
2132 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
2133 Type *ParamTy = FT->getParamType(i);
2134 Type *ActTy = (*AI)->getType();
2135
2136 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
2137 return false; // Cannot transform this parameter value.
2138
2139 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
2140 overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
2141 return false; // Attribute not compatible with transformed value.
2142
2143 if (CS.isInAllocaArgument(i))
2144 return false; // Cannot transform to and from inalloca.
2145
2146 // If the parameter is passed as a byval argument, then we have to have a
2147 // sized type and the sized type has to have the same size as the old type.
2148 if (ParamTy != ActTy &&
2149 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
2150 Attribute::ByVal)) {
2151 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
2152 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
2153 return false;
2154
2155 Type *CurElTy = ActTy->getPointerElementType();
2156 if (DL.getTypeAllocSize(CurElTy) !=
2157 DL.getTypeAllocSize(ParamPTy->getElementType()))
2158 return false;
2159 }
2160 }
2161
2162 if (Callee->isDeclaration()) {
2163 // Do not delete arguments unless we have a function body.
2164 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
2165 return false;
2166
2167 // If the callee is just a declaration, don't change the varargsness of the
2168 // call. We don't want to introduce a varargs call where one doesn't
2169 // already exist.
2170 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
2171 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
2172 return false;
2173
2174 // If both the callee and the cast type are varargs, we still have to make
2175 // sure the number of fixed parameters are the same or we have the same
2176 // ABI issues as if we introduce a varargs call.
2177 if (FT->isVarArg() &&
2178 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
2179 FT->getNumParams() !=
2180 cast<FunctionType>(APTy->getElementType())->getNumParams())
2181 return false;
2182 }
2183
2184 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
2185 !CallerPAL.isEmpty())
2186 // In this case we have more arguments than the new function type, but we
2187 // won't be dropping them. Check that these extra arguments have attributes
2188 // that are compatible with being a vararg call argument.
2189 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
2190 unsigned Index = CallerPAL.getSlotIndex(i - 1);
2191 if (Index <= FT->getNumParams())
2192 break;
2193
2194 // Check if it has an attribute that's incompatible with varargs.
2195 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
2196 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
2197 return false;
2198 }
2199
2200
2201 // Okay, we decided that this is a safe thing to do: go ahead and start
2202 // inserting cast instructions as necessary.
2203 std::vector<Value*> Args;
2204 Args.reserve(NumActualArgs);
2205 SmallVector<AttributeSet, 8> attrVec;
2206 attrVec.reserve(NumCommonArgs);
2207
2208 // Get any return attributes.
2209 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2210
2211 // If the return value is not being used, the type may not be compatible
2212 // with the existing attributes. Wipe out any problematic attributes.
2213 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2214
2215 // Add the new return attributes.
2216 if (RAttrs.hasAttributes())
2217 attrVec.push_back(AttributeSet::get(Caller->getContext(),
2218 AttributeSet::ReturnIndex, RAttrs));
2219
2220 AI = CS.arg_begin();
2221 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2222 Type *ParamTy = FT->getParamType(i);
2223
2224 if ((*AI)->getType() == ParamTy) {
2225 Args.push_back(*AI);
2226 } else {
2227 Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
2228 }
2229
2230 // Add any parameter attributes.
2231 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2232 if (PAttrs.hasAttributes())
2233 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
2234 PAttrs));
2235 }
2236
2237 // If the function takes more arguments than the call was taking, add them
2238 // now.
2239 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
2240 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2241
2242 // If we are removing arguments to the function, emit an obnoxious warning.
2243 if (FT->getNumParams() < NumActualArgs) {
2244 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2245 if (FT->isVarArg()) {
2246 // Add all of the arguments in their promoted form to the arg list.
2247 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2248 Type *PTy = getPromotedType((*AI)->getType());
2249 if (PTy != (*AI)->getType()) {
2250 // Must promote to pass through va_arg area!
2251 Instruction::CastOps opcode =
2252 CastInst::getCastOpcode(*AI, false, PTy, false);
2253 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
2254 } else {
2255 Args.push_back(*AI);
2256 }
2257
2258 // Add any parameter attributes.
2259 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2260 if (PAttrs.hasAttributes())
2261 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
2262 PAttrs));
2263 }
2264 }
2265 }
2266
2267 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2268 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
2269 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
2270
2271 if (NewRetTy->isVoidTy())
2272 Caller->setName(""); // Void type should not have a name.
2273
2274 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
2275 attrVec);
2276
2277 SmallVector<OperandBundleDef, 1> OpBundles;
2278 CS.getOperandBundlesAsDefs(OpBundles);
2279
2280 Instruction *NC;
2281 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2282 NC = Builder->CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(),
2283 Args, OpBundles);
2284 NC->takeName(II);
2285 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
2286 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
2287 } else {
2288 CallInst *CI = cast<CallInst>(Caller);
2289 NC = Builder->CreateCall(Callee, Args, OpBundles);
2290 NC->takeName(CI);
2291 if (CI->isTailCall())
2292 cast<CallInst>(NC)->setTailCall();
2293 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
2294 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
2295 }
2296
2297 // Insert a cast of the return type as necessary.
2298 Value *NV = NC;
2299 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2300 if (!NV->getType()->isVoidTy()) {
2301 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2302 NC->setDebugLoc(Caller->getDebugLoc());
2303
2304 // If this is an invoke instruction, we should insert it after the first
2305 // non-phi, instruction in the normal successor block.
2306 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2307 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2308 InsertNewInstBefore(NC, *I);
2309 } else {
2310 // Otherwise, it's a call, just insert cast right after the call.
2311 InsertNewInstBefore(NC, *Caller);
2312 }
2313 Worklist.AddUsersToWorkList(*Caller);
2314 } else {
2315 NV = UndefValue::get(Caller->getType());
2316 }
2317 }
2318
2319 if (!Caller->use_empty())
2320 ReplaceInstUsesWith(*Caller, NV);
2321 else if (Caller->hasValueHandle()) {
2322 if (OldRetTy == NV->getType())
2323 ValueHandleBase::ValueIsRAUWd(Caller, NV);
2324 else
2325 // We cannot call ValueIsRAUWd with a different type, and the
2326 // actual tracked value will disappear.
2327 ValueHandleBase::ValueIsDeleted(Caller);
2328 }
2329
2330 EraseInstFromFunction(*Caller);
2331 return true;
2332 }
2333
2334 // transformCallThroughTrampoline - Turn a call to a function created by
2335 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
2336 // underlying function.
2337 //
2338 Instruction *
transformCallThroughTrampoline(CallSite CS,IntrinsicInst * Tramp)2339 InstCombiner::transformCallThroughTrampoline(CallSite CS,
2340 IntrinsicInst *Tramp) {
2341 Value *Callee = CS.getCalledValue();
2342 PointerType *PTy = cast<PointerType>(Callee->getType());
2343 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2344 const AttributeSet &Attrs = CS.getAttributes();
2345
2346 // If the call already has the 'nest' attribute somewhere then give up -
2347 // otherwise 'nest' would occur twice after splicing in the chain.
2348 if (Attrs.hasAttrSomewhere(Attribute::Nest))
2349 return nullptr;
2350
2351 assert(Tramp &&
2352 "transformCallThroughTrampoline called with incorrect CallSite.");
2353
2354 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
2355 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
2356 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
2357
2358 const AttributeSet &NestAttrs = NestF->getAttributes();
2359 if (!NestAttrs.isEmpty()) {
2360 unsigned NestIdx = 1;
2361 Type *NestTy = nullptr;
2362 AttributeSet NestAttr;
2363
2364 // Look for a parameter marked with the 'nest' attribute.
2365 for (FunctionType::param_iterator I = NestFTy->param_begin(),
2366 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
2367 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
2368 // Record the parameter type and any other attributes.
2369 NestTy = *I;
2370 NestAttr = NestAttrs.getParamAttributes(NestIdx);
2371 break;
2372 }
2373
2374 if (NestTy) {
2375 Instruction *Caller = CS.getInstruction();
2376 std::vector<Value*> NewArgs;
2377 NewArgs.reserve(CS.arg_size() + 1);
2378
2379 SmallVector<AttributeSet, 8> NewAttrs;
2380 NewAttrs.reserve(Attrs.getNumSlots() + 1);
2381
2382 // Insert the nest argument into the call argument list, which may
2383 // mean appending it. Likewise for attributes.
2384
2385 // Add any result attributes.
2386 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
2387 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2388 Attrs.getRetAttributes()));
2389
2390 {
2391 unsigned Idx = 1;
2392 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
2393 do {
2394 if (Idx == NestIdx) {
2395 // Add the chain argument and attributes.
2396 Value *NestVal = Tramp->getArgOperand(2);
2397 if (NestVal->getType() != NestTy)
2398 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
2399 NewArgs.push_back(NestVal);
2400 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2401 NestAttr));
2402 }
2403
2404 if (I == E)
2405 break;
2406
2407 // Add the original argument and attributes.
2408 NewArgs.push_back(*I);
2409 AttributeSet Attr = Attrs.getParamAttributes(Idx);
2410 if (Attr.hasAttributes(Idx)) {
2411 AttrBuilder B(Attr, Idx);
2412 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2413 Idx + (Idx >= NestIdx), B));
2414 }
2415
2416 ++Idx, ++I;
2417 } while (1);
2418 }
2419
2420 // Add any function attributes.
2421 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
2422 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
2423 Attrs.getFnAttributes()));
2424
2425 // The trampoline may have been bitcast to a bogus type (FTy).
2426 // Handle this by synthesizing a new function type, equal to FTy
2427 // with the chain parameter inserted.
2428
2429 std::vector<Type*> NewTypes;
2430 NewTypes.reserve(FTy->getNumParams()+1);
2431
2432 // Insert the chain's type into the list of parameter types, which may
2433 // mean appending it.
2434 {
2435 unsigned Idx = 1;
2436 FunctionType::param_iterator I = FTy->param_begin(),
2437 E = FTy->param_end();
2438
2439 do {
2440 if (Idx == NestIdx)
2441 // Add the chain's type.
2442 NewTypes.push_back(NestTy);
2443
2444 if (I == E)
2445 break;
2446
2447 // Add the original type.
2448 NewTypes.push_back(*I);
2449
2450 ++Idx, ++I;
2451 } while (1);
2452 }
2453
2454 // Replace the trampoline call with a direct call. Let the generic
2455 // code sort out any function type mismatches.
2456 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
2457 FTy->isVarArg());
2458 Constant *NewCallee =
2459 NestF->getType() == PointerType::getUnqual(NewFTy) ?
2460 NestF : ConstantExpr::getBitCast(NestF,
2461 PointerType::getUnqual(NewFTy));
2462 const AttributeSet &NewPAL =
2463 AttributeSet::get(FTy->getContext(), NewAttrs);
2464
2465 Instruction *NewCaller;
2466 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2467 NewCaller = InvokeInst::Create(NewCallee,
2468 II->getNormalDest(), II->getUnwindDest(),
2469 NewArgs);
2470 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
2471 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
2472 } else {
2473 NewCaller = CallInst::Create(NewCallee, NewArgs);
2474 if (cast<CallInst>(Caller)->isTailCall())
2475 cast<CallInst>(NewCaller)->setTailCall();
2476 cast<CallInst>(NewCaller)->
2477 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
2478 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
2479 }
2480
2481 return NewCaller;
2482 }
2483 }
2484
2485 // Replace the trampoline call with a direct call. Since there is no 'nest'
2486 // parameter, there is no need to adjust the argument list. Let the generic
2487 // code sort out any function type mismatches.
2488 Constant *NewCallee =
2489 NestF->getType() == PTy ? NestF :
2490 ConstantExpr::getBitCast(NestF, PTy);
2491 CS.setCalledFunction(NewCallee);
2492 return CS.getInstruction();
2493 }
2494