1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines routines for folding instructions into constants.
11 //
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
15 // dependency issues.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Config/config.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include <cassert>
39 #include <cerrno>
40 #include <cfenv>
41 #include <cmath>
42 #include <limits>
43
44 using namespace llvm;
45
46 namespace {
47
48 //===----------------------------------------------------------------------===//
49 // Constant Folding internal helper functions
50 //===----------------------------------------------------------------------===//
51
52 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
53 /// This always returns a non-null constant, but it may be a
54 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)55 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
56 // Catch the obvious splat cases.
57 if (C->isNullValue() && !DestTy->isX86_MMXTy())
58 return Constant::getNullValue(DestTy);
59 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
60 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
61 return Constant::getAllOnesValue(DestTy);
62
63 // Handle a vector->integer cast.
64 if (auto *IT = dyn_cast<IntegerType>(DestTy)) {
65 auto *VTy = dyn_cast<VectorType>(C->getType());
66 if (!VTy)
67 return ConstantExpr::getBitCast(C, DestTy);
68
69 unsigned NumSrcElts = VTy->getNumElements();
70 Type *SrcEltTy = VTy->getElementType();
71
72 // If the vector is a vector of floating point, convert it to vector of int
73 // to simplify things.
74 if (SrcEltTy->isFloatingPointTy()) {
75 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
76 Type *SrcIVTy =
77 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
78 // Ask IR to do the conversion now that #elts line up.
79 C = ConstantExpr::getBitCast(C, SrcIVTy);
80 }
81
82 // Now that we know that the input value is a vector of integers, just shift
83 // and insert them into our result.
84 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
85 APInt Result(IT->getBitWidth(), 0);
86 for (unsigned i = 0; i != NumSrcElts; ++i) {
87 Constant *Element;
88 if (DL.isLittleEndian())
89 Element = C->getAggregateElement(NumSrcElts-i-1);
90 else
91 Element = C->getAggregateElement(i);
92
93 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
94 if (!ElementCI)
95 return ConstantExpr::getBitCast(C, DestTy);
96
97 Result <<= BitShift;
98 Result |= ElementCI->getValue().zextOrSelf(IT->getBitWidth());
99 }
100
101 return ConstantInt::get(IT, Result);
102 }
103
104 // The code below only handles casts to vectors currently.
105 auto *DestVTy = dyn_cast<VectorType>(DestTy);
106 if (!DestVTy)
107 return ConstantExpr::getBitCast(C, DestTy);
108
109 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
110 // vector so the code below can handle it uniformly.
111 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
112 Constant *Ops = C; // don't take the address of C!
113 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
114 }
115
116 // If this is a bitcast from constant vector -> vector, fold it.
117 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
118 return ConstantExpr::getBitCast(C, DestTy);
119
120 // If the element types match, IR can fold it.
121 unsigned NumDstElt = DestVTy->getNumElements();
122 unsigned NumSrcElt = C->getType()->getVectorNumElements();
123 if (NumDstElt == NumSrcElt)
124 return ConstantExpr::getBitCast(C, DestTy);
125
126 Type *SrcEltTy = C->getType()->getVectorElementType();
127 Type *DstEltTy = DestVTy->getElementType();
128
129 // Otherwise, we're changing the number of elements in a vector, which
130 // requires endianness information to do the right thing. For example,
131 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
132 // folds to (little endian):
133 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
134 // and to (big endian):
135 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
136
137 // First thing is first. We only want to think about integer here, so if
138 // we have something in FP form, recast it as integer.
139 if (DstEltTy->isFloatingPointTy()) {
140 // Fold to an vector of integers with same size as our FP type.
141 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
142 Type *DestIVTy =
143 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
144 // Recursively handle this integer conversion, if possible.
145 C = FoldBitCast(C, DestIVTy, DL);
146
147 // Finally, IR can handle this now that #elts line up.
148 return ConstantExpr::getBitCast(C, DestTy);
149 }
150
151 // Okay, we know the destination is integer, if the input is FP, convert
152 // it to integer first.
153 if (SrcEltTy->isFloatingPointTy()) {
154 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
155 Type *SrcIVTy =
156 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
157 // Ask IR to do the conversion now that #elts line up.
158 C = ConstantExpr::getBitCast(C, SrcIVTy);
159 // If IR wasn't able to fold it, bail out.
160 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
161 !isa<ConstantDataVector>(C))
162 return C;
163 }
164
165 // Now we know that the input and output vectors are both integer vectors
166 // of the same size, and that their #elements is not the same. Do the
167 // conversion here, which depends on whether the input or output has
168 // more elements.
169 bool isLittleEndian = DL.isLittleEndian();
170
171 SmallVector<Constant*, 32> Result;
172 if (NumDstElt < NumSrcElt) {
173 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
174 Constant *Zero = Constant::getNullValue(DstEltTy);
175 unsigned Ratio = NumSrcElt/NumDstElt;
176 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
177 unsigned SrcElt = 0;
178 for (unsigned i = 0; i != NumDstElt; ++i) {
179 // Build each element of the result.
180 Constant *Elt = Zero;
181 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
182 for (unsigned j = 0; j != Ratio; ++j) {
183 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
184 if (!Src) // Reject constantexpr elements.
185 return ConstantExpr::getBitCast(C, DestTy);
186
187 // Zero extend the element to the right size.
188 Src = ConstantExpr::getZExt(Src, Elt->getType());
189
190 // Shift it to the right place, depending on endianness.
191 Src = ConstantExpr::getShl(Src,
192 ConstantInt::get(Src->getType(), ShiftAmt));
193 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
194
195 // Mix it in.
196 Elt = ConstantExpr::getOr(Elt, Src);
197 }
198 Result.push_back(Elt);
199 }
200 return ConstantVector::get(Result);
201 }
202
203 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
204 unsigned Ratio = NumDstElt/NumSrcElt;
205 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
206
207 // Loop over each source value, expanding into multiple results.
208 for (unsigned i = 0; i != NumSrcElt; ++i) {
209 auto *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
210 if (!Src) // Reject constantexpr elements.
211 return ConstantExpr::getBitCast(C, DestTy);
212
213 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
214 for (unsigned j = 0; j != Ratio; ++j) {
215 // Shift the piece of the value into the right place, depending on
216 // endianness.
217 Constant *Elt = ConstantExpr::getLShr(Src,
218 ConstantInt::get(Src->getType(), ShiftAmt));
219 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
220
221 // Truncate the element to an integer with the same pointer size and
222 // convert the element back to a pointer using a inttoptr.
223 if (DstEltTy->isPointerTy()) {
224 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
225 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
226 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
227 continue;
228 }
229
230 // Truncate and remember this piece.
231 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
232 }
233 }
234
235 return ConstantVector::get(Result);
236 }
237
238 } // end anonymous namespace
239
240 /// If this constant is a constant offset from a global, return the global and
241 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL)242 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
243 APInt &Offset, const DataLayout &DL) {
244 // Trivial case, constant is the global.
245 if ((GV = dyn_cast<GlobalValue>(C))) {
246 unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
247 Offset = APInt(BitWidth, 0);
248 return true;
249 }
250
251 // Otherwise, if this isn't a constant expr, bail out.
252 auto *CE = dyn_cast<ConstantExpr>(C);
253 if (!CE) return false;
254
255 // Look through ptr->int and ptr->ptr casts.
256 if (CE->getOpcode() == Instruction::PtrToInt ||
257 CE->getOpcode() == Instruction::BitCast)
258 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
259
260 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
261 auto *GEP = dyn_cast<GEPOperator>(CE);
262 if (!GEP)
263 return false;
264
265 unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
266 APInt TmpOffset(BitWidth, 0);
267
268 // If the base isn't a global+constant, we aren't either.
269 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
270 return false;
271
272 // Otherwise, add any offset that our operands provide.
273 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
274 return false;
275
276 Offset = TmpOffset;
277 return true;
278 }
279
280 namespace {
281
282 /// Recursive helper to read bits out of global. C is the constant being copied
283 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
284 /// results into and BytesLeft is the number of bytes left in
285 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)286 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
287 unsigned BytesLeft, const DataLayout &DL) {
288 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
289 "Out of range access");
290
291 // If this element is zero or undefined, we can just return since *CurPtr is
292 // zero initialized.
293 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
294 return true;
295
296 if (auto *CI = dyn_cast<ConstantInt>(C)) {
297 if (CI->getBitWidth() > 64 ||
298 (CI->getBitWidth() & 7) != 0)
299 return false;
300
301 uint64_t Val = CI->getZExtValue();
302 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
303
304 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
305 int n = ByteOffset;
306 if (!DL.isLittleEndian())
307 n = IntBytes - n - 1;
308 CurPtr[i] = (unsigned char)(Val >> (n * 8));
309 ++ByteOffset;
310 }
311 return true;
312 }
313
314 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
315 if (CFP->getType()->isDoubleTy()) {
316 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
317 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
318 }
319 if (CFP->getType()->isFloatTy()){
320 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
321 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
322 }
323 if (CFP->getType()->isHalfTy()){
324 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
325 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
326 }
327 return false;
328 }
329
330 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
331 const StructLayout *SL = DL.getStructLayout(CS->getType());
332 unsigned Index = SL->getElementContainingOffset(ByteOffset);
333 uint64_t CurEltOffset = SL->getElementOffset(Index);
334 ByteOffset -= CurEltOffset;
335
336 while (1) {
337 // If the element access is to the element itself and not to tail padding,
338 // read the bytes from the element.
339 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
340
341 if (ByteOffset < EltSize &&
342 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
343 BytesLeft, DL))
344 return false;
345
346 ++Index;
347
348 // Check to see if we read from the last struct element, if so we're done.
349 if (Index == CS->getType()->getNumElements())
350 return true;
351
352 // If we read all of the bytes we needed from this element we're done.
353 uint64_t NextEltOffset = SL->getElementOffset(Index);
354
355 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
356 return true;
357
358 // Move to the next element of the struct.
359 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
360 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
361 ByteOffset = 0;
362 CurEltOffset = NextEltOffset;
363 }
364 // not reached.
365 }
366
367 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
368 isa<ConstantDataSequential>(C)) {
369 Type *EltTy = C->getType()->getSequentialElementType();
370 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
371 uint64_t Index = ByteOffset / EltSize;
372 uint64_t Offset = ByteOffset - Index * EltSize;
373 uint64_t NumElts;
374 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
375 NumElts = AT->getNumElements();
376 else
377 NumElts = C->getType()->getVectorNumElements();
378
379 for (; Index != NumElts; ++Index) {
380 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
381 BytesLeft, DL))
382 return false;
383
384 uint64_t BytesWritten = EltSize - Offset;
385 assert(BytesWritten <= EltSize && "Not indexing into this element?");
386 if (BytesWritten >= BytesLeft)
387 return true;
388
389 Offset = 0;
390 BytesLeft -= BytesWritten;
391 CurPtr += BytesWritten;
392 }
393 return true;
394 }
395
396 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
397 if (CE->getOpcode() == Instruction::IntToPtr &&
398 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
399 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
400 BytesLeft, DL);
401 }
402 }
403
404 // Otherwise, unknown initializer type.
405 return false;
406 }
407
FoldReinterpretLoadFromConstPtr(Constant * C,Type * LoadTy,const DataLayout & DL)408 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
409 const DataLayout &DL) {
410 auto *PTy = cast<PointerType>(C->getType());
411 auto *IntType = dyn_cast<IntegerType>(LoadTy);
412
413 // If this isn't an integer load we can't fold it directly.
414 if (!IntType) {
415 unsigned AS = PTy->getAddressSpace();
416
417 // If this is a float/double load, we can try folding it as an int32/64 load
418 // and then bitcast the result. This can be useful for union cases. Note
419 // that address spaces don't matter here since we're not going to result in
420 // an actual new load.
421 Type *MapTy;
422 if (LoadTy->isHalfTy())
423 MapTy = Type::getInt16Ty(C->getContext());
424 else if (LoadTy->isFloatTy())
425 MapTy = Type::getInt32Ty(C->getContext());
426 else if (LoadTy->isDoubleTy())
427 MapTy = Type::getInt64Ty(C->getContext());
428 else if (LoadTy->isVectorTy()) {
429 MapTy = PointerType::getIntNTy(C->getContext(),
430 DL.getTypeAllocSizeInBits(LoadTy));
431 } else
432 return nullptr;
433
434 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
435 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
436 return FoldBitCast(Res, LoadTy, DL);
437 return nullptr;
438 }
439
440 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
441 if (BytesLoaded > 32 || BytesLoaded == 0)
442 return nullptr;
443
444 GlobalValue *GVal;
445 APInt OffsetAI;
446 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
447 return nullptr;
448
449 auto *GV = dyn_cast<GlobalVariable>(GVal);
450 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
451 !GV->getInitializer()->getType()->isSized())
452 return nullptr;
453
454 int64_t Offset = OffsetAI.getSExtValue();
455 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
456
457 // If we're not accessing anything in this constant, the result is undefined.
458 if (Offset + BytesLoaded <= 0)
459 return UndefValue::get(IntType);
460
461 // If we're not accessing anything in this constant, the result is undefined.
462 if (Offset >= InitializerSize)
463 return UndefValue::get(IntType);
464
465 unsigned char RawBytes[32] = {0};
466 unsigned char *CurPtr = RawBytes;
467 unsigned BytesLeft = BytesLoaded;
468
469 // If we're loading off the beginning of the global, some bytes may be valid.
470 if (Offset < 0) {
471 CurPtr += -Offset;
472 BytesLeft += Offset;
473 Offset = 0;
474 }
475
476 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
477 return nullptr;
478
479 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
480 if (DL.isLittleEndian()) {
481 ResultVal = RawBytes[BytesLoaded - 1];
482 for (unsigned i = 1; i != BytesLoaded; ++i) {
483 ResultVal <<= 8;
484 ResultVal |= RawBytes[BytesLoaded - 1 - i];
485 }
486 } else {
487 ResultVal = RawBytes[0];
488 for (unsigned i = 1; i != BytesLoaded; ++i) {
489 ResultVal <<= 8;
490 ResultVal |= RawBytes[i];
491 }
492 }
493
494 return ConstantInt::get(IntType->getContext(), ResultVal);
495 }
496
ConstantFoldLoadThroughBitcast(ConstantExpr * CE,Type * DestTy,const DataLayout & DL)497 Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, Type *DestTy,
498 const DataLayout &DL) {
499 auto *SrcPtr = CE->getOperand(0);
500 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
501 if (!SrcPtrTy)
502 return nullptr;
503 Type *SrcTy = SrcPtrTy->getPointerElementType();
504
505 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
506 if (!C)
507 return nullptr;
508
509 do {
510 Type *SrcTy = C->getType();
511
512 // If the type sizes are the same and a cast is legal, just directly
513 // cast the constant.
514 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
515 Instruction::CastOps Cast = Instruction::BitCast;
516 // If we are going from a pointer to int or vice versa, we spell the cast
517 // differently.
518 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
519 Cast = Instruction::IntToPtr;
520 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
521 Cast = Instruction::PtrToInt;
522
523 if (CastInst::castIsValid(Cast, C, DestTy))
524 return ConstantExpr::getCast(Cast, C, DestTy);
525 }
526
527 // If this isn't an aggregate type, there is nothing we can do to drill down
528 // and find a bitcastable constant.
529 if (!SrcTy->isAggregateType())
530 return nullptr;
531
532 // We're simulating a load through a pointer that was bitcast to point to
533 // a different type, so we can try to walk down through the initial
534 // elements of an aggregate to see if some part of th e aggregate is
535 // castable to implement the "load" semantic model.
536 C = C->getAggregateElement(0u);
537 } while (C);
538
539 return nullptr;
540 }
541
542 } // end anonymous namespace
543
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)544 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
545 const DataLayout &DL) {
546 // First, try the easy cases:
547 if (auto *GV = dyn_cast<GlobalVariable>(C))
548 if (GV->isConstant() && GV->hasDefinitiveInitializer())
549 return GV->getInitializer();
550
551 if (auto *GA = dyn_cast<GlobalAlias>(C))
552 if (GA->getAliasee() && !GA->isInterposable())
553 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
554
555 // If the loaded value isn't a constant expr, we can't handle it.
556 auto *CE = dyn_cast<ConstantExpr>(C);
557 if (!CE)
558 return nullptr;
559
560 if (CE->getOpcode() == Instruction::GetElementPtr) {
561 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
562 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
563 if (Constant *V =
564 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
565 return V;
566 }
567 }
568 }
569
570 if (CE->getOpcode() == Instruction::BitCast)
571 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, Ty, DL))
572 return LoadedC;
573
574 // Instead of loading constant c string, use corresponding integer value
575 // directly if string length is small enough.
576 StringRef Str;
577 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
578 size_t StrLen = Str.size();
579 unsigned NumBits = Ty->getPrimitiveSizeInBits();
580 // Replace load with immediate integer if the result is an integer or fp
581 // value.
582 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
583 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
584 APInt StrVal(NumBits, 0);
585 APInt SingleChar(NumBits, 0);
586 if (DL.isLittleEndian()) {
587 for (unsigned char C : reverse(Str.bytes())) {
588 SingleChar = static_cast<uint64_t>(C);
589 StrVal = (StrVal << 8) | SingleChar;
590 }
591 } else {
592 for (unsigned char C : Str.bytes()) {
593 SingleChar = static_cast<uint64_t>(C);
594 StrVal = (StrVal << 8) | SingleChar;
595 }
596 // Append NULL at the end.
597 SingleChar = 0;
598 StrVal = (StrVal << 8) | SingleChar;
599 }
600
601 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
602 if (Ty->isFloatingPointTy())
603 Res = ConstantExpr::getBitCast(Res, Ty);
604 return Res;
605 }
606 }
607
608 // If this load comes from anywhere in a constant global, and if the global
609 // is all undef or zero, we know what it loads.
610 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
611 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
612 if (GV->getInitializer()->isNullValue())
613 return Constant::getNullValue(Ty);
614 if (isa<UndefValue>(GV->getInitializer()))
615 return UndefValue::get(Ty);
616 }
617 }
618
619 // Try hard to fold loads from bitcasted strange and non-type-safe things.
620 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
621 }
622
623 namespace {
624
ConstantFoldLoadInst(const LoadInst * LI,const DataLayout & DL)625 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
626 if (LI->isVolatile()) return nullptr;
627
628 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
629 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
630
631 return nullptr;
632 }
633
634 /// One of Op0/Op1 is a constant expression.
635 /// Attempt to symbolically evaluate the result of a binary operator merging
636 /// these together. If target data info is available, it is provided as DL,
637 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)638 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
639 const DataLayout &DL) {
640 // SROA
641
642 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
643 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
644 // bits.
645
646 if (Opc == Instruction::And) {
647 unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
648 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
649 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
650 computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
651 computeKnownBits(Op1, KnownZero1, KnownOne1, DL);
652 if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
653 // All the bits of Op0 that the 'and' could be masking are already zero.
654 return Op0;
655 }
656 if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
657 // All the bits of Op1 that the 'and' could be masking are already zero.
658 return Op1;
659 }
660
661 APInt KnownZero = KnownZero0 | KnownZero1;
662 APInt KnownOne = KnownOne0 & KnownOne1;
663 if ((KnownZero | KnownOne).isAllOnesValue()) {
664 return ConstantInt::get(Op0->getType(), KnownOne);
665 }
666 }
667
668 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
669 // constant. This happens frequently when iterating over a global array.
670 if (Opc == Instruction::Sub) {
671 GlobalValue *GV1, *GV2;
672 APInt Offs1, Offs2;
673
674 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
675 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
676 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
677
678 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
679 // PtrToInt may change the bitwidth so we have convert to the right size
680 // first.
681 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
682 Offs2.zextOrTrunc(OpSize));
683 }
684 }
685
686 return nullptr;
687 }
688
689 /// If array indices are not pointer-sized integers, explicitly cast them so
690 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,const DataLayout & DL,const TargetLibraryInfo * TLI)691 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
692 Type *ResultTy, const DataLayout &DL,
693 const TargetLibraryInfo *TLI) {
694 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
695
696 bool Any = false;
697 SmallVector<Constant*, 32> NewIdxs;
698 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
699 if ((i == 1 ||
700 !isa<StructType>(GetElementPtrInst::getIndexedType(SrcElemTy,
701 Ops.slice(1, i - 1)))) &&
702 Ops[i]->getType() != IntPtrTy) {
703 Any = true;
704 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
705 true,
706 IntPtrTy,
707 true),
708 Ops[i], IntPtrTy));
709 } else
710 NewIdxs.push_back(Ops[i]);
711 }
712
713 if (!Any)
714 return nullptr;
715
716 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs);
717 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
718 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
719 C = Folded;
720 }
721
722 return C;
723 }
724
725 /// Strip the pointer casts, but preserve the address space information.
StripPtrCastKeepAS(Constant * Ptr,Type * & ElemTy)726 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
727 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
728 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
729 Ptr = Ptr->stripPointerCasts();
730 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
731
732 ElemTy = NewPtrTy->getPointerElementType();
733
734 // Preserve the address space number of the pointer.
735 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
736 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
737 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
738 }
739 return Ptr;
740 }
741
742 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)743 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
744 ArrayRef<Constant *> Ops,
745 const DataLayout &DL,
746 const TargetLibraryInfo *TLI) {
747 Type *SrcElemTy = GEP->getSourceElementType();
748 Type *ResElemTy = GEP->getResultElementType();
749 Type *ResTy = GEP->getType();
750 if (!SrcElemTy->isSized())
751 return nullptr;
752
753 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, DL, TLI))
754 return C;
755
756 Constant *Ptr = Ops[0];
757 if (!Ptr->getType()->isPointerTy())
758 return nullptr;
759
760 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
761
762 // If this is a constant expr gep that is effectively computing an
763 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
764 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
765 if (!isa<ConstantInt>(Ops[i])) {
766
767 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
768 // "inttoptr (sub (ptrtoint Ptr), V)"
769 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
770 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
771 assert((!CE || CE->getType() == IntPtrTy) &&
772 "CastGEPIndices didn't canonicalize index types!");
773 if (CE && CE->getOpcode() == Instruction::Sub &&
774 CE->getOperand(0)->isNullValue()) {
775 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
776 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
777 Res = ConstantExpr::getIntToPtr(Res, ResTy);
778 if (auto *ResCE = dyn_cast<ConstantExpr>(Res))
779 Res = ConstantFoldConstantExpression(ResCE, DL, TLI);
780 return Res;
781 }
782 }
783 return nullptr;
784 }
785
786 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
787 APInt Offset =
788 APInt(BitWidth,
789 DL.getIndexedOffsetInType(
790 SrcElemTy,
791 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
792 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
793
794 // If this is a GEP of a GEP, fold it all into a single GEP.
795 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
796 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
797
798 // Do not try the incorporate the sub-GEP if some index is not a number.
799 bool AllConstantInt = true;
800 for (Value *NestedOp : NestedOps)
801 if (!isa<ConstantInt>(NestedOp)) {
802 AllConstantInt = false;
803 break;
804 }
805 if (!AllConstantInt)
806 break;
807
808 Ptr = cast<Constant>(GEP->getOperand(0));
809 SrcElemTy = GEP->getSourceElementType();
810 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
811 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
812 }
813
814 // If the base value for this address is a literal integer value, fold the
815 // getelementptr to the resulting integer value casted to the pointer type.
816 APInt BasePtr(BitWidth, 0);
817 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
818 if (CE->getOpcode() == Instruction::IntToPtr) {
819 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
820 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
821 }
822 }
823
824 if (Ptr->isNullValue() || BasePtr != 0) {
825 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
826 return ConstantExpr::getIntToPtr(C, ResTy);
827 }
828
829 // Otherwise form a regular getelementptr. Recompute the indices so that
830 // we eliminate over-indexing of the notional static type array bounds.
831 // This makes it easy to determine if the getelementptr is "inbounds".
832 // Also, this helps GlobalOpt do SROA on GlobalVariables.
833 Type *Ty = Ptr->getType();
834 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
835 SmallVector<Constant *, 32> NewIdxs;
836
837 do {
838 if (!Ty->isStructTy()) {
839 if (Ty->isPointerTy()) {
840 // The only pointer indexing we'll do is on the first index of the GEP.
841 if (!NewIdxs.empty())
842 break;
843
844 Ty = SrcElemTy;
845
846 // Only handle pointers to sized types, not pointers to functions.
847 if (!Ty->isSized())
848 return nullptr;
849 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
850 Ty = ATy->getElementType();
851 } else {
852 // We've reached some non-indexable type.
853 break;
854 }
855
856 // Determine which element of the array the offset points into.
857 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
858 if (ElemSize == 0) {
859 // The element size is 0. This may be [0 x Ty]*, so just use a zero
860 // index for this level and proceed to the next level to see if it can
861 // accommodate the offset.
862 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
863 } else {
864 // The element size is non-zero divide the offset by the element
865 // size (rounding down), to compute the index at this level.
866 bool Overflow;
867 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
868 if (Overflow)
869 break;
870 Offset -= NewIdx * ElemSize;
871 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
872 }
873 } else {
874 auto *STy = cast<StructType>(Ty);
875 // If we end up with an offset that isn't valid for this struct type, we
876 // can't re-form this GEP in a regular form, so bail out. The pointer
877 // operand likely went through casts that are necessary to make the GEP
878 // sensible.
879 const StructLayout &SL = *DL.getStructLayout(STy);
880 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
881 break;
882
883 // Determine which field of the struct the offset points into. The
884 // getZExtValue is fine as we've already ensured that the offset is
885 // within the range representable by the StructLayout API.
886 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
887 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
888 ElIdx));
889 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
890 Ty = STy->getTypeAtIndex(ElIdx);
891 }
892 } while (Ty != ResElemTy);
893
894 // If we haven't used up the entire offset by descending the static
895 // type, then the offset is pointing into the middle of an indivisible
896 // member, so we can't simplify it.
897 if (Offset != 0)
898 return nullptr;
899
900 // Create a GEP.
901 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs);
902 assert(C->getType()->getPointerElementType() == Ty &&
903 "Computed GetElementPtr has unexpected type!");
904
905 // If we ended up indexing a member with a type that doesn't match
906 // the type of what the original indices indexed, add a cast.
907 if (Ty != ResElemTy)
908 C = FoldBitCast(C, ResTy, DL);
909
910 return C;
911 }
912
913 /// Attempt to constant fold an instruction with the
914 /// specified opcode and operands. If successful, the constant result is
915 /// returned, if not, null is returned. Note that this function can fail when
916 /// attempting to fold instructions like loads and stores, which have no
917 /// constant expression form.
918 ///
919 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
920 /// information, due to only being passed an opcode and operands. Constant
921 /// folding using this function strips this information.
922 ///
ConstantFoldInstOperandsImpl(const Value * InstOrCE,Type * DestTy,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)923 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, Type *DestTy,
924 unsigned Opcode,
925 ArrayRef<Constant *> Ops,
926 const DataLayout &DL,
927 const TargetLibraryInfo *TLI) {
928 // Handle easy binops first.
929 if (Instruction::isBinaryOp(Opcode))
930 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
931
932 if (Instruction::isCast(Opcode))
933 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
934
935 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
936 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
937 return C;
938
939 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(),
940 Ops[0], Ops.slice(1));
941 }
942
943 switch (Opcode) {
944 default: return nullptr;
945 case Instruction::ICmp:
946 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
947 case Instruction::Call:
948 if (auto *F = dyn_cast<Function>(Ops.back()))
949 if (canConstantFoldCallTo(F))
950 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
951 return nullptr;
952 case Instruction::Select:
953 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
954 case Instruction::ExtractElement:
955 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
956 case Instruction::InsertElement:
957 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
958 case Instruction::ShuffleVector:
959 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
960 }
961 }
962
963 } // end anonymous namespace
964
965 //===----------------------------------------------------------------------===//
966 // Constant Folding public APIs
967 //===----------------------------------------------------------------------===//
968
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)969 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
970 const TargetLibraryInfo *TLI) {
971 // Handle PHI nodes quickly here...
972 if (auto *PN = dyn_cast<PHINode>(I)) {
973 Constant *CommonValue = nullptr;
974
975 for (Value *Incoming : PN->incoming_values()) {
976 // If the incoming value is undef then skip it. Note that while we could
977 // skip the value if it is equal to the phi node itself we choose not to
978 // because that would break the rule that constant folding only applies if
979 // all operands are constants.
980 if (isa<UndefValue>(Incoming))
981 continue;
982 // If the incoming value is not a constant, then give up.
983 auto *C = dyn_cast<Constant>(Incoming);
984 if (!C)
985 return nullptr;
986 // Fold the PHI's operands.
987 if (auto *NewC = dyn_cast<ConstantExpr>(C))
988 C = ConstantFoldConstantExpression(NewC, DL, TLI);
989 // If the incoming value is a different constant to
990 // the one we saw previously, then give up.
991 if (CommonValue && C != CommonValue)
992 return nullptr;
993 CommonValue = C;
994 }
995
996
997 // If we reach here, all incoming values are the same constant or undef.
998 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
999 }
1000
1001 // Scan the operand list, checking to see if they are all constants, if so,
1002 // hand off to ConstantFoldInstOperandsImpl.
1003 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1004 return nullptr;
1005
1006 SmallVector<Constant *, 8> Ops;
1007 for (const Use &OpU : I->operands()) {
1008 auto *Op = cast<Constant>(&OpU);
1009 // Fold the Instruction's operands.
1010 if (auto *NewCE = dyn_cast<ConstantExpr>(Op))
1011 Op = ConstantFoldConstantExpression(NewCE, DL, TLI);
1012
1013 Ops.push_back(Op);
1014 }
1015
1016 if (const auto *CI = dyn_cast<CmpInst>(I))
1017 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1018 DL, TLI);
1019
1020 if (const auto *LI = dyn_cast<LoadInst>(I))
1021 return ConstantFoldLoadInst(LI, DL);
1022
1023 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1024 return ConstantExpr::getInsertValue(
1025 cast<Constant>(IVI->getAggregateOperand()),
1026 cast<Constant>(IVI->getInsertedValueOperand()),
1027 IVI->getIndices());
1028 }
1029
1030 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1031 return ConstantExpr::getExtractValue(
1032 cast<Constant>(EVI->getAggregateOperand()),
1033 EVI->getIndices());
1034 }
1035
1036 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1037 }
1038
1039 namespace {
1040
1041 Constant *
ConstantFoldConstantExpressionImpl(const ConstantExpr * CE,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallPtrSetImpl<ConstantExpr * > & FoldedOps)1042 ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL,
1043 const TargetLibraryInfo *TLI,
1044 SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
1045 SmallVector<Constant *, 8> Ops;
1046 for (const Use &NewU : CE->operands()) {
1047 auto *NewC = cast<Constant>(&NewU);
1048 // Recursively fold the ConstantExpr's operands. If we have already folded
1049 // a ConstantExpr, we don't have to process it again.
1050 if (auto *NewCE = dyn_cast<ConstantExpr>(NewC)) {
1051 if (FoldedOps.insert(NewCE).second)
1052 NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps);
1053 }
1054 Ops.push_back(NewC);
1055 }
1056
1057 if (CE->isCompare())
1058 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1059 DL, TLI);
1060
1061 return ConstantFoldInstOperandsImpl(CE, CE->getType(), CE->getOpcode(), Ops,
1062 DL, TLI);
1063 }
1064
1065 } // end anonymous namespace
1066
ConstantFoldConstantExpression(const ConstantExpr * CE,const DataLayout & DL,const TargetLibraryInfo * TLI)1067 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
1068 const DataLayout &DL,
1069 const TargetLibraryInfo *TLI) {
1070 SmallPtrSet<ConstantExpr *, 4> FoldedOps;
1071 return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps);
1072 }
1073
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1074 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1075 ArrayRef<Constant *> Ops,
1076 const DataLayout &DL,
1077 const TargetLibraryInfo *TLI) {
1078 return ConstantFoldInstOperandsImpl(I, I->getType(), I->getOpcode(), Ops, DL,
1079 TLI);
1080 }
1081
ConstantFoldInstOperands(unsigned Opcode,Type * DestTy,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1082 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
1083 ArrayRef<Constant *> Ops,
1084 const DataLayout &DL,
1085 const TargetLibraryInfo *TLI) {
1086 assert(Opcode != Instruction::GetElementPtr && "Invalid for GEPs");
1087 return ConstantFoldInstOperandsImpl(nullptr, DestTy, Opcode, Ops, DL, TLI);
1088 }
1089
ConstantFoldCompareInstOperands(unsigned Predicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI)1090 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1091 Constant *Ops0, Constant *Ops1,
1092 const DataLayout &DL,
1093 const TargetLibraryInfo *TLI) {
1094 // fold: icmp (inttoptr x), null -> icmp x, 0
1095 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1096 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1097 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1098 //
1099 // FIXME: The following comment is out of data and the DataLayout is here now.
1100 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1101 // around to know if bit truncation is happening.
1102 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1103 if (Ops1->isNullValue()) {
1104 if (CE0->getOpcode() == Instruction::IntToPtr) {
1105 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1106 // Convert the integer value to the right size to ensure we get the
1107 // proper extension or truncation.
1108 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1109 IntPtrTy, false);
1110 Constant *Null = Constant::getNullValue(C->getType());
1111 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1112 }
1113
1114 // Only do this transformation if the int is intptrty in size, otherwise
1115 // there is a truncation or extension that we aren't modeling.
1116 if (CE0->getOpcode() == Instruction::PtrToInt) {
1117 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1118 if (CE0->getType() == IntPtrTy) {
1119 Constant *C = CE0->getOperand(0);
1120 Constant *Null = Constant::getNullValue(C->getType());
1121 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1122 }
1123 }
1124 }
1125
1126 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1127 if (CE0->getOpcode() == CE1->getOpcode()) {
1128 if (CE0->getOpcode() == Instruction::IntToPtr) {
1129 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1130
1131 // Convert the integer value to the right size to ensure we get the
1132 // proper extension or truncation.
1133 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1134 IntPtrTy, false);
1135 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1136 IntPtrTy, false);
1137 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1138 }
1139
1140 // Only do this transformation if the int is intptrty in size, otherwise
1141 // there is a truncation or extension that we aren't modeling.
1142 if (CE0->getOpcode() == Instruction::PtrToInt) {
1143 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1144 if (CE0->getType() == IntPtrTy &&
1145 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1146 return ConstantFoldCompareInstOperands(
1147 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1148 }
1149 }
1150 }
1151 }
1152
1153 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1154 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1155 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1156 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1157 Constant *LHS = ConstantFoldCompareInstOperands(
1158 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1159 Constant *RHS = ConstantFoldCompareInstOperands(
1160 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1161 unsigned OpC =
1162 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1163 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1164 }
1165 }
1166
1167 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1168 }
1169
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1170 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1171 Constant *RHS,
1172 const DataLayout &DL) {
1173 assert(Instruction::isBinaryOp(Opcode));
1174 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1175 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1176 return C;
1177
1178 return ConstantExpr::get(Opcode, LHS, RHS);
1179 }
1180
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1181 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1182 Type *DestTy, const DataLayout &DL) {
1183 assert(Instruction::isCast(Opcode));
1184 switch (Opcode) {
1185 default:
1186 llvm_unreachable("Missing case");
1187 case Instruction::PtrToInt:
1188 // If the input is a inttoptr, eliminate the pair. This requires knowing
1189 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1190 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1191 if (CE->getOpcode() == Instruction::IntToPtr) {
1192 Constant *Input = CE->getOperand(0);
1193 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1194 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1195 if (PtrWidth < InWidth) {
1196 Constant *Mask =
1197 ConstantInt::get(CE->getContext(),
1198 APInt::getLowBitsSet(InWidth, PtrWidth));
1199 Input = ConstantExpr::getAnd(Input, Mask);
1200 }
1201 // Do a zext or trunc to get to the dest size.
1202 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1203 }
1204 }
1205 return ConstantExpr::getCast(Opcode, C, DestTy);
1206 case Instruction::IntToPtr:
1207 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1208 // the int size is >= the ptr size and the address spaces are the same.
1209 // This requires knowing the width of a pointer, so it can't be done in
1210 // ConstantExpr::getCast.
1211 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1212 if (CE->getOpcode() == Instruction::PtrToInt) {
1213 Constant *SrcPtr = CE->getOperand(0);
1214 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1215 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1216
1217 if (MidIntSize >= SrcPtrSize) {
1218 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1219 if (SrcAS == DestTy->getPointerAddressSpace())
1220 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1221 }
1222 }
1223 }
1224
1225 return ConstantExpr::getCast(Opcode, C, DestTy);
1226 case Instruction::Trunc:
1227 case Instruction::ZExt:
1228 case Instruction::SExt:
1229 case Instruction::FPTrunc:
1230 case Instruction::FPExt:
1231 case Instruction::UIToFP:
1232 case Instruction::SIToFP:
1233 case Instruction::FPToUI:
1234 case Instruction::FPToSI:
1235 case Instruction::AddrSpaceCast:
1236 return ConstantExpr::getCast(Opcode, C, DestTy);
1237 case Instruction::BitCast:
1238 return FoldBitCast(C, DestTy, DL);
1239 }
1240 }
1241
ConstantFoldLoadThroughGEPConstantExpr(Constant * C,ConstantExpr * CE)1242 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1243 ConstantExpr *CE) {
1244 if (!CE->getOperand(1)->isNullValue())
1245 return nullptr; // Do not allow stepping over the value!
1246
1247 // Loop over all of the operands, tracking down which value we are
1248 // addressing.
1249 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1250 C = C->getAggregateElement(CE->getOperand(i));
1251 if (!C)
1252 return nullptr;
1253 }
1254 return C;
1255 }
1256
1257 Constant *
ConstantFoldLoadThroughGEPIndices(Constant * C,ArrayRef<Constant * > Indices)1258 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1259 ArrayRef<Constant *> Indices) {
1260 // Loop over all of the operands, tracking down which value we are
1261 // addressing.
1262 for (Constant *Index : Indices) {
1263 C = C->getAggregateElement(Index);
1264 if (!C)
1265 return nullptr;
1266 }
1267 return C;
1268 }
1269
1270 //===----------------------------------------------------------------------===//
1271 // Constant Folding for Calls
1272 //
1273
canConstantFoldCallTo(const Function * F)1274 bool llvm::canConstantFoldCallTo(const Function *F) {
1275 switch (F->getIntrinsicID()) {
1276 case Intrinsic::fabs:
1277 case Intrinsic::minnum:
1278 case Intrinsic::maxnum:
1279 case Intrinsic::log:
1280 case Intrinsic::log2:
1281 case Intrinsic::log10:
1282 case Intrinsic::exp:
1283 case Intrinsic::exp2:
1284 case Intrinsic::floor:
1285 case Intrinsic::ceil:
1286 case Intrinsic::sqrt:
1287 case Intrinsic::sin:
1288 case Intrinsic::cos:
1289 case Intrinsic::trunc:
1290 case Intrinsic::rint:
1291 case Intrinsic::nearbyint:
1292 case Intrinsic::pow:
1293 case Intrinsic::powi:
1294 case Intrinsic::bswap:
1295 case Intrinsic::ctpop:
1296 case Intrinsic::ctlz:
1297 case Intrinsic::cttz:
1298 case Intrinsic::fma:
1299 case Intrinsic::fmuladd:
1300 case Intrinsic::copysign:
1301 case Intrinsic::round:
1302 case Intrinsic::masked_load:
1303 case Intrinsic::sadd_with_overflow:
1304 case Intrinsic::uadd_with_overflow:
1305 case Intrinsic::ssub_with_overflow:
1306 case Intrinsic::usub_with_overflow:
1307 case Intrinsic::smul_with_overflow:
1308 case Intrinsic::umul_with_overflow:
1309 case Intrinsic::convert_from_fp16:
1310 case Intrinsic::convert_to_fp16:
1311 case Intrinsic::bitreverse:
1312 case Intrinsic::x86_sse_cvtss2si:
1313 case Intrinsic::x86_sse_cvtss2si64:
1314 case Intrinsic::x86_sse_cvttss2si:
1315 case Intrinsic::x86_sse_cvttss2si64:
1316 case Intrinsic::x86_sse2_cvtsd2si:
1317 case Intrinsic::x86_sse2_cvtsd2si64:
1318 case Intrinsic::x86_sse2_cvttsd2si:
1319 case Intrinsic::x86_sse2_cvttsd2si64:
1320 return true;
1321 default:
1322 return false;
1323 case 0: break;
1324 }
1325
1326 if (!F->hasName())
1327 return false;
1328 StringRef Name = F->getName();
1329
1330 // In these cases, the check of the length is required. We don't want to
1331 // return true for a name like "cos\0blah" which strcmp would return equal to
1332 // "cos", but has length 8.
1333 switch (Name[0]) {
1334 default:
1335 return false;
1336 case 'a':
1337 return Name == "acos" || Name == "asin" || Name == "atan" ||
1338 Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1339 Name == "atanf" || Name == "atan2f";
1340 case 'c':
1341 return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1342 Name == "ceilf" || Name == "cosf" || Name == "coshf";
1343 case 'e':
1344 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1345 case 'f':
1346 return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1347 Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1348 case 'l':
1349 return Name == "log" || Name == "log10" || Name == "logf" ||
1350 Name == "log10f";
1351 case 'p':
1352 return Name == "pow" || Name == "powf";
1353 case 's':
1354 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1355 Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1356 case 't':
1357 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1358 }
1359 }
1360
1361 namespace {
1362
GetConstantFoldFPValue(double V,Type * Ty)1363 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1364 if (Ty->isHalfTy()) {
1365 APFloat APF(V);
1366 bool unused;
1367 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
1368 return ConstantFP::get(Ty->getContext(), APF);
1369 }
1370 if (Ty->isFloatTy())
1371 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1372 if (Ty->isDoubleTy())
1373 return ConstantFP::get(Ty->getContext(), APFloat(V));
1374 llvm_unreachable("Can only constant fold half/float/double");
1375 }
1376
1377 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1378 inline void llvm_fenv_clearexcept() {
1379 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1380 feclearexcept(FE_ALL_EXCEPT);
1381 #endif
1382 errno = 0;
1383 }
1384
1385 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1386 inline bool llvm_fenv_testexcept() {
1387 int errno_val = errno;
1388 if (errno_val == ERANGE || errno_val == EDOM)
1389 return true;
1390 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1391 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1392 return true;
1393 #endif
1394 return false;
1395 }
1396
ConstantFoldFP(double (* NativeFP)(double),double V,Type * Ty)1397 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1398 llvm_fenv_clearexcept();
1399 V = NativeFP(V);
1400 if (llvm_fenv_testexcept()) {
1401 llvm_fenv_clearexcept();
1402 return nullptr;
1403 }
1404
1405 return GetConstantFoldFPValue(V, Ty);
1406 }
1407
ConstantFoldBinaryFP(double (* NativeFP)(double,double),double V,double W,Type * Ty)1408 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1409 double W, Type *Ty) {
1410 llvm_fenv_clearexcept();
1411 V = NativeFP(V, W);
1412 if (llvm_fenv_testexcept()) {
1413 llvm_fenv_clearexcept();
1414 return nullptr;
1415 }
1416
1417 return GetConstantFoldFPValue(V, Ty);
1418 }
1419
1420 /// Attempt to fold an SSE floating point to integer conversion of a constant
1421 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1422 /// used (toward nearest, ties to even). This matches the behavior of the
1423 /// non-truncating SSE instructions in the default rounding mode. The desired
1424 /// integer type Ty is used to select how many bits are available for the
1425 /// result. Returns null if the conversion cannot be performed, otherwise
1426 /// returns the Constant value resulting from the conversion.
ConstantFoldConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty)1427 Constant *ConstantFoldConvertToInt(const APFloat &Val, bool roundTowardZero,
1428 Type *Ty) {
1429 // All of these conversion intrinsics form an integer of at most 64bits.
1430 unsigned ResultWidth = Ty->getIntegerBitWidth();
1431 assert(ResultWidth <= 64 &&
1432 "Can only constant fold conversions to 64 and 32 bit ints");
1433
1434 uint64_t UIntVal;
1435 bool isExact = false;
1436 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1437 : APFloat::rmNearestTiesToEven;
1438 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
1439 /*isSigned=*/true, mode,
1440 &isExact);
1441 if (status != APFloat::opOK && status != APFloat::opInexact)
1442 return nullptr;
1443 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
1444 }
1445
getValueAsDouble(ConstantFP * Op)1446 double getValueAsDouble(ConstantFP *Op) {
1447 Type *Ty = Op->getType();
1448
1449 if (Ty->isFloatTy())
1450 return Op->getValueAPF().convertToFloat();
1451
1452 if (Ty->isDoubleTy())
1453 return Op->getValueAPF().convertToDouble();
1454
1455 bool unused;
1456 APFloat APF = Op->getValueAPF();
1457 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
1458 return APF.convertToDouble();
1459 }
1460
ConstantFoldScalarCall(StringRef Name,unsigned IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)1461 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1462 ArrayRef<Constant *> Operands,
1463 const TargetLibraryInfo *TLI) {
1464 if (Operands.size() == 1) {
1465 if (isa<UndefValue>(Operands[0])) {
1466 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
1467 if (IntrinsicID == Intrinsic::cos)
1468 return Constant::getNullValue(Ty);
1469 }
1470 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1471 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1472 APFloat Val(Op->getValueAPF());
1473
1474 bool lost = false;
1475 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
1476
1477 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1478 }
1479
1480 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1481 return nullptr;
1482
1483 if (IntrinsicID == Intrinsic::round) {
1484 APFloat V = Op->getValueAPF();
1485 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1486 return ConstantFP::get(Ty->getContext(), V);
1487 }
1488
1489 if (IntrinsicID == Intrinsic::floor) {
1490 APFloat V = Op->getValueAPF();
1491 V.roundToIntegral(APFloat::rmTowardNegative);
1492 return ConstantFP::get(Ty->getContext(), V);
1493 }
1494
1495 if (IntrinsicID == Intrinsic::ceil) {
1496 APFloat V = Op->getValueAPF();
1497 V.roundToIntegral(APFloat::rmTowardPositive);
1498 return ConstantFP::get(Ty->getContext(), V);
1499 }
1500
1501 if (IntrinsicID == Intrinsic::trunc) {
1502 APFloat V = Op->getValueAPF();
1503 V.roundToIntegral(APFloat::rmTowardZero);
1504 return ConstantFP::get(Ty->getContext(), V);
1505 }
1506
1507 if (IntrinsicID == Intrinsic::rint) {
1508 APFloat V = Op->getValueAPF();
1509 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1510 return ConstantFP::get(Ty->getContext(), V);
1511 }
1512
1513 if (IntrinsicID == Intrinsic::nearbyint) {
1514 APFloat V = Op->getValueAPF();
1515 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1516 return ConstantFP::get(Ty->getContext(), V);
1517 }
1518
1519 /// We only fold functions with finite arguments. Folding NaN and inf is
1520 /// likely to be aborted with an exception anyway, and some host libms
1521 /// have known errors raising exceptions.
1522 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1523 return nullptr;
1524
1525 /// Currently APFloat versions of these functions do not exist, so we use
1526 /// the host native double versions. Float versions are not called
1527 /// directly but for all these it is true (float)(f((double)arg)) ==
1528 /// f(arg). Long double not supported yet.
1529 double V = getValueAsDouble(Op);
1530
1531 switch (IntrinsicID) {
1532 default: break;
1533 case Intrinsic::fabs:
1534 return ConstantFoldFP(fabs, V, Ty);
1535 case Intrinsic::log2:
1536 return ConstantFoldFP(Log2, V, Ty);
1537 case Intrinsic::log:
1538 return ConstantFoldFP(log, V, Ty);
1539 case Intrinsic::log10:
1540 return ConstantFoldFP(log10, V, Ty);
1541 case Intrinsic::exp:
1542 return ConstantFoldFP(exp, V, Ty);
1543 case Intrinsic::exp2:
1544 return ConstantFoldFP(exp2, V, Ty);
1545 case Intrinsic::sin:
1546 return ConstantFoldFP(sin, V, Ty);
1547 case Intrinsic::cos:
1548 return ConstantFoldFP(cos, V, Ty);
1549 }
1550
1551 if (!TLI)
1552 return nullptr;
1553
1554 switch (Name[0]) {
1555 case 'a':
1556 if ((Name == "acos" && TLI->has(LibFunc::acos)) ||
1557 (Name == "acosf" && TLI->has(LibFunc::acosf)))
1558 return ConstantFoldFP(acos, V, Ty);
1559 else if ((Name == "asin" && TLI->has(LibFunc::asin)) ||
1560 (Name == "asinf" && TLI->has(LibFunc::asinf)))
1561 return ConstantFoldFP(asin, V, Ty);
1562 else if ((Name == "atan" && TLI->has(LibFunc::atan)) ||
1563 (Name == "atanf" && TLI->has(LibFunc::atanf)))
1564 return ConstantFoldFP(atan, V, Ty);
1565 break;
1566 case 'c':
1567 if ((Name == "ceil" && TLI->has(LibFunc::ceil)) ||
1568 (Name == "ceilf" && TLI->has(LibFunc::ceilf)))
1569 return ConstantFoldFP(ceil, V, Ty);
1570 else if ((Name == "cos" && TLI->has(LibFunc::cos)) ||
1571 (Name == "cosf" && TLI->has(LibFunc::cosf)))
1572 return ConstantFoldFP(cos, V, Ty);
1573 else if ((Name == "cosh" && TLI->has(LibFunc::cosh)) ||
1574 (Name == "coshf" && TLI->has(LibFunc::coshf)))
1575 return ConstantFoldFP(cosh, V, Ty);
1576 break;
1577 case 'e':
1578 if ((Name == "exp" && TLI->has(LibFunc::exp)) ||
1579 (Name == "expf" && TLI->has(LibFunc::expf)))
1580 return ConstantFoldFP(exp, V, Ty);
1581 if ((Name == "exp2" && TLI->has(LibFunc::exp2)) ||
1582 (Name == "exp2f" && TLI->has(LibFunc::exp2f)))
1583 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1584 // C99 library.
1585 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1586 break;
1587 case 'f':
1588 if ((Name == "fabs" && TLI->has(LibFunc::fabs)) ||
1589 (Name == "fabsf" && TLI->has(LibFunc::fabsf)))
1590 return ConstantFoldFP(fabs, V, Ty);
1591 else if ((Name == "floor" && TLI->has(LibFunc::floor)) ||
1592 (Name == "floorf" && TLI->has(LibFunc::floorf)))
1593 return ConstantFoldFP(floor, V, Ty);
1594 break;
1595 case 'l':
1596 if ((Name == "log" && V > 0 && TLI->has(LibFunc::log)) ||
1597 (Name == "logf" && V > 0 && TLI->has(LibFunc::logf)))
1598 return ConstantFoldFP(log, V, Ty);
1599 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) ||
1600 (Name == "log10f" && V > 0 && TLI->has(LibFunc::log10f)))
1601 return ConstantFoldFP(log10, V, Ty);
1602 else if (IntrinsicID == Intrinsic::sqrt &&
1603 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
1604 if (V >= -0.0)
1605 return ConstantFoldFP(sqrt, V, Ty);
1606 else {
1607 // Unlike the sqrt definitions in C/C++, POSIX, and IEEE-754 - which
1608 // all guarantee or favor returning NaN - the square root of a
1609 // negative number is not defined for the LLVM sqrt intrinsic.
1610 // This is because the intrinsic should only be emitted in place of
1611 // libm's sqrt function when using "no-nans-fp-math".
1612 return UndefValue::get(Ty);
1613 }
1614 }
1615 break;
1616 case 's':
1617 if ((Name == "sin" && TLI->has(LibFunc::sin)) ||
1618 (Name == "sinf" && TLI->has(LibFunc::sinf)))
1619 return ConstantFoldFP(sin, V, Ty);
1620 else if ((Name == "sinh" && TLI->has(LibFunc::sinh)) ||
1621 (Name == "sinhf" && TLI->has(LibFunc::sinhf)))
1622 return ConstantFoldFP(sinh, V, Ty);
1623 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) ||
1624 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)))
1625 return ConstantFoldFP(sqrt, V, Ty);
1626 break;
1627 case 't':
1628 if ((Name == "tan" && TLI->has(LibFunc::tan)) ||
1629 (Name == "tanf" && TLI->has(LibFunc::tanf)))
1630 return ConstantFoldFP(tan, V, Ty);
1631 else if ((Name == "tanh" && TLI->has(LibFunc::tanh)) ||
1632 (Name == "tanhf" && TLI->has(LibFunc::tanhf)))
1633 return ConstantFoldFP(tanh, V, Ty);
1634 break;
1635 default:
1636 break;
1637 }
1638 return nullptr;
1639 }
1640
1641 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1642 switch (IntrinsicID) {
1643 case Intrinsic::bswap:
1644 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1645 case Intrinsic::ctpop:
1646 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1647 case Intrinsic::bitreverse:
1648 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1649 case Intrinsic::convert_from_fp16: {
1650 APFloat Val(APFloat::IEEEhalf, Op->getValue());
1651
1652 bool lost = false;
1653 APFloat::opStatus status = Val.convert(
1654 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1655
1656 // Conversion is always precise.
1657 (void)status;
1658 assert(status == APFloat::opOK && !lost &&
1659 "Precision lost during fp16 constfolding");
1660
1661 return ConstantFP::get(Ty->getContext(), Val);
1662 }
1663 default:
1664 return nullptr;
1665 }
1666 }
1667
1668 // Support ConstantVector in case we have an Undef in the top.
1669 if (isa<ConstantVector>(Operands[0]) ||
1670 isa<ConstantDataVector>(Operands[0])) {
1671 auto *Op = cast<Constant>(Operands[0]);
1672 switch (IntrinsicID) {
1673 default: break;
1674 case Intrinsic::x86_sse_cvtss2si:
1675 case Intrinsic::x86_sse_cvtss2si64:
1676 case Intrinsic::x86_sse2_cvtsd2si:
1677 case Intrinsic::x86_sse2_cvtsd2si64:
1678 if (ConstantFP *FPOp =
1679 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1680 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1681 /*roundTowardZero=*/false, Ty);
1682 case Intrinsic::x86_sse_cvttss2si:
1683 case Intrinsic::x86_sse_cvttss2si64:
1684 case Intrinsic::x86_sse2_cvttsd2si:
1685 case Intrinsic::x86_sse2_cvttsd2si64:
1686 if (ConstantFP *FPOp =
1687 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1688 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1689 /*roundTowardZero=*/true, Ty);
1690 }
1691 }
1692
1693 if (isa<UndefValue>(Operands[0])) {
1694 if (IntrinsicID == Intrinsic::bswap)
1695 return Operands[0];
1696 return nullptr;
1697 }
1698
1699 return nullptr;
1700 }
1701
1702 if (Operands.size() == 2) {
1703 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1704 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1705 return nullptr;
1706 double Op1V = getValueAsDouble(Op1);
1707
1708 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1709 if (Op2->getType() != Op1->getType())
1710 return nullptr;
1711
1712 double Op2V = getValueAsDouble(Op2);
1713 if (IntrinsicID == Intrinsic::pow) {
1714 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1715 }
1716 if (IntrinsicID == Intrinsic::copysign) {
1717 APFloat V1 = Op1->getValueAPF();
1718 const APFloat &V2 = Op2->getValueAPF();
1719 V1.copySign(V2);
1720 return ConstantFP::get(Ty->getContext(), V1);
1721 }
1722
1723 if (IntrinsicID == Intrinsic::minnum) {
1724 const APFloat &C1 = Op1->getValueAPF();
1725 const APFloat &C2 = Op2->getValueAPF();
1726 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1727 }
1728
1729 if (IntrinsicID == Intrinsic::maxnum) {
1730 const APFloat &C1 = Op1->getValueAPF();
1731 const APFloat &C2 = Op2->getValueAPF();
1732 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1733 }
1734
1735 if (!TLI)
1736 return nullptr;
1737 if ((Name == "pow" && TLI->has(LibFunc::pow)) ||
1738 (Name == "powf" && TLI->has(LibFunc::powf)))
1739 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1740 if ((Name == "fmod" && TLI->has(LibFunc::fmod)) ||
1741 (Name == "fmodf" && TLI->has(LibFunc::fmodf)))
1742 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1743 if ((Name == "atan2" && TLI->has(LibFunc::atan2)) ||
1744 (Name == "atan2f" && TLI->has(LibFunc::atan2f)))
1745 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1746 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1747 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1748 return ConstantFP::get(Ty->getContext(),
1749 APFloat((float)std::pow((float)Op1V,
1750 (int)Op2C->getZExtValue())));
1751 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1752 return ConstantFP::get(Ty->getContext(),
1753 APFloat((float)std::pow((float)Op1V,
1754 (int)Op2C->getZExtValue())));
1755 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
1756 return ConstantFP::get(Ty->getContext(),
1757 APFloat((double)std::pow((double)Op1V,
1758 (int)Op2C->getZExtValue())));
1759 }
1760 return nullptr;
1761 }
1762
1763 if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1764 if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1765 switch (IntrinsicID) {
1766 default: break;
1767 case Intrinsic::sadd_with_overflow:
1768 case Intrinsic::uadd_with_overflow:
1769 case Intrinsic::ssub_with_overflow:
1770 case Intrinsic::usub_with_overflow:
1771 case Intrinsic::smul_with_overflow:
1772 case Intrinsic::umul_with_overflow: {
1773 APInt Res;
1774 bool Overflow;
1775 switch (IntrinsicID) {
1776 default: llvm_unreachable("Invalid case");
1777 case Intrinsic::sadd_with_overflow:
1778 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1779 break;
1780 case Intrinsic::uadd_with_overflow:
1781 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1782 break;
1783 case Intrinsic::ssub_with_overflow:
1784 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1785 break;
1786 case Intrinsic::usub_with_overflow:
1787 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1788 break;
1789 case Intrinsic::smul_with_overflow:
1790 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1791 break;
1792 case Intrinsic::umul_with_overflow:
1793 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1794 break;
1795 }
1796 Constant *Ops[] = {
1797 ConstantInt::get(Ty->getContext(), Res),
1798 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
1799 };
1800 return ConstantStruct::get(cast<StructType>(Ty), Ops);
1801 }
1802 case Intrinsic::cttz:
1803 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1804 return UndefValue::get(Ty);
1805 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1806 case Intrinsic::ctlz:
1807 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1808 return UndefValue::get(Ty);
1809 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
1810 }
1811 }
1812
1813 return nullptr;
1814 }
1815 return nullptr;
1816 }
1817
1818 if (Operands.size() != 3)
1819 return nullptr;
1820
1821 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1822 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1823 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
1824 switch (IntrinsicID) {
1825 default: break;
1826 case Intrinsic::fma:
1827 case Intrinsic::fmuladd: {
1828 APFloat V = Op1->getValueAPF();
1829 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
1830 Op3->getValueAPF(),
1831 APFloat::rmNearestTiesToEven);
1832 if (s != APFloat::opInvalidOp)
1833 return ConstantFP::get(Ty->getContext(), V);
1834
1835 return nullptr;
1836 }
1837 }
1838 }
1839 }
1840 }
1841
1842 return nullptr;
1843 }
1844
ConstantFoldVectorCall(StringRef Name,unsigned IntrinsicID,VectorType * VTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI)1845 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
1846 VectorType *VTy, ArrayRef<Constant *> Operands,
1847 const DataLayout &DL,
1848 const TargetLibraryInfo *TLI) {
1849 SmallVector<Constant *, 4> Result(VTy->getNumElements());
1850 SmallVector<Constant *, 4> Lane(Operands.size());
1851 Type *Ty = VTy->getElementType();
1852
1853 if (IntrinsicID == Intrinsic::masked_load) {
1854 auto *SrcPtr = Operands[0];
1855 auto *Mask = Operands[2];
1856 auto *Passthru = Operands[3];
1857
1858 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
1859
1860 SmallVector<Constant *, 32> NewElements;
1861 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1862 auto *MaskElt = Mask->getAggregateElement(I);
1863 if (!MaskElt)
1864 break;
1865 auto *PassthruElt = Passthru->getAggregateElement(I);
1866 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
1867 if (isa<UndefValue>(MaskElt)) {
1868 if (PassthruElt)
1869 NewElements.push_back(PassthruElt);
1870 else if (VecElt)
1871 NewElements.push_back(VecElt);
1872 else
1873 return nullptr;
1874 }
1875 if (MaskElt->isNullValue()) {
1876 if (!PassthruElt)
1877 return nullptr;
1878 NewElements.push_back(PassthruElt);
1879 } else if (MaskElt->isOneValue()) {
1880 if (!VecElt)
1881 return nullptr;
1882 NewElements.push_back(VecElt);
1883 } else {
1884 return nullptr;
1885 }
1886 }
1887 if (NewElements.size() != VTy->getNumElements())
1888 return nullptr;
1889 return ConstantVector::get(NewElements);
1890 }
1891
1892 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1893 // Gather a column of constants.
1894 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
1895 Constant *Agg = Operands[J]->getAggregateElement(I);
1896 if (!Agg)
1897 return nullptr;
1898
1899 Lane[J] = Agg;
1900 }
1901
1902 // Use the regular scalar folding to simplify this column.
1903 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
1904 if (!Folded)
1905 return nullptr;
1906 Result[I] = Folded;
1907 }
1908
1909 return ConstantVector::get(Result);
1910 }
1911
1912 } // end anonymous namespace
1913
1914 Constant *
ConstantFoldCall(Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)1915 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
1916 const TargetLibraryInfo *TLI) {
1917 if (!F->hasName())
1918 return nullptr;
1919 StringRef Name = F->getName();
1920
1921 Type *Ty = F->getReturnType();
1922
1923 if (auto *VTy = dyn_cast<VectorType>(Ty))
1924 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
1925 F->getParent()->getDataLayout(), TLI);
1926
1927 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
1928 }
1929