1 //===-- Value.cpp - Implement the Value class -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Value, ValueHandle, and User classes.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/IR/Value.h"
14 #include "LLVMContextImpl.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallString.h"
18 #include "llvm/IR/Constant.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/DerivedUser.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/InstrTypes.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/Statepoint.h"
30 #include "llvm/IR/ValueHandle.h"
31 #include "llvm/IR/ValueSymbolTable.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/ManagedStatic.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include <algorithm>
38
39 using namespace llvm;
40
41 static cl::opt<unsigned> NonGlobalValueMaxNameSize(
42 "non-global-value-max-name-size", cl::Hidden, cl::init(1024),
43 cl::desc("Maximum size for the name of non-global values."));
44
45 //===----------------------------------------------------------------------===//
46 // Value Class
47 //===----------------------------------------------------------------------===//
checkType(Type * Ty)48 static inline Type *checkType(Type *Ty) {
49 assert(Ty && "Value defined with a null type: Error!");
50 return Ty;
51 }
52
Value(Type * ty,unsigned scid)53 Value::Value(Type *ty, unsigned scid)
54 : VTy(checkType(ty)), UseList(nullptr), SubclassID(scid), HasValueHandle(0),
55 SubclassOptionalData(0), SubclassData(0), NumUserOperands(0),
56 IsUsedByMD(false), HasName(false), HasMetadata(false) {
57 static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
60 // constructed.
61 if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke ||
62 SubclassID == Instruction::CallBr)
63 assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
64 "invalid CallInst type!");
65 else if (SubclassID != BasicBlockVal &&
66 (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
67 assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
68 "Cannot create non-first-class values except for constants!");
69 static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
70 "Value too big");
71 }
72
~Value()73 Value::~Value() {
74 // Notify all ValueHandles (if present) that this value is going away.
75 if (HasValueHandle)
76 ValueHandleBase::ValueIsDeleted(this);
77 if (isUsedByMetadata())
78 ValueAsMetadata::handleDeletion(this);
79
80 // Remove associated metadata from context.
81 if (HasMetadata)
82 clearMetadata();
83
84 #ifndef NDEBUG // Only in -g mode...
85 // Check to make sure that there are no uses of this value that are still
86 // around when the value is destroyed. If there are, then we have a dangling
87 // reference and something is wrong. This code is here to print out where
88 // the value is still being referenced.
89 //
90 // Note that use_empty() cannot be called here, as it eventually downcasts
91 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
92 // been destructed, so accessing it is UB.
93 //
94 if (!materialized_use_empty()) {
95 dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
96 for (auto *U : users())
97 dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
98 }
99 #endif
100 assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
101
102 // If this value is named, destroy the name. This should not be in a symtab
103 // at this point.
104 destroyValueName();
105 }
106
deleteValue()107 void Value::deleteValue() {
108 switch (getValueID()) {
109 #define HANDLE_VALUE(Name) \
110 case Value::Name##Val: \
111 delete static_cast<Name *>(this); \
112 break;
113 #define HANDLE_MEMORY_VALUE(Name) \
114 case Value::Name##Val: \
115 static_cast<DerivedUser *>(this)->DeleteValue( \
116 static_cast<DerivedUser *>(this)); \
117 break;
118 #define HANDLE_CONSTANT(Name) \
119 case Value::Name##Val: \
120 llvm_unreachable("constants should be destroyed with destroyConstant"); \
121 break;
122 #define HANDLE_INSTRUCTION(Name) /* nothing */
123 #include "llvm/IR/Value.def"
124
125 #define HANDLE_INST(N, OPC, CLASS) \
126 case Value::InstructionVal + Instruction::OPC: \
127 delete static_cast<CLASS *>(this); \
128 break;
129 #define HANDLE_USER_INST(N, OPC, CLASS)
130 #include "llvm/IR/Instruction.def"
131
132 default:
133 llvm_unreachable("attempting to delete unknown value kind");
134 }
135 }
136
destroyValueName()137 void Value::destroyValueName() {
138 ValueName *Name = getValueName();
139 if (Name) {
140 MallocAllocator Allocator;
141 Name->Destroy(Allocator);
142 }
143 setValueName(nullptr);
144 }
145
hasNUses(unsigned N) const146 bool Value::hasNUses(unsigned N) const {
147 return hasNItems(use_begin(), use_end(), N);
148 }
149
hasNUsesOrMore(unsigned N) const150 bool Value::hasNUsesOrMore(unsigned N) const {
151 return hasNItemsOrMore(use_begin(), use_end(), N);
152 }
153
hasOneUser() const154 bool Value::hasOneUser() const {
155 if (use_empty())
156 return false;
157 if (hasOneUse())
158 return true;
159 return std::equal(++user_begin(), user_end(), user_begin());
160 }
161
isUnDroppableUser(const User * U)162 static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
163
getSingleUndroppableUse()164 Use *Value::getSingleUndroppableUse() {
165 Use *Result = nullptr;
166 for (Use &U : uses()) {
167 if (!U.getUser()->isDroppable()) {
168 if (Result)
169 return nullptr;
170 Result = &U;
171 }
172 }
173 return Result;
174 }
175
hasNUndroppableUses(unsigned int N) const176 bool Value::hasNUndroppableUses(unsigned int N) const {
177 return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
178 }
179
hasNUndroppableUsesOrMore(unsigned int N) const180 bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
181 return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
182 }
183
dropDroppableUses(llvm::function_ref<bool (const Use *)> ShouldDrop)184 void Value::dropDroppableUses(
185 llvm::function_ref<bool(const Use *)> ShouldDrop) {
186 SmallVector<Use *, 8> ToBeEdited;
187 for (Use &U : uses())
188 if (U.getUser()->isDroppable() && ShouldDrop(&U))
189 ToBeEdited.push_back(&U);
190 for (Use *U : ToBeEdited)
191 dropDroppableUse(*U);
192 }
193
dropDroppableUsesIn(User & Usr)194 void Value::dropDroppableUsesIn(User &Usr) {
195 assert(Usr.isDroppable() && "Expected a droppable user!");
196 for (Use &UsrOp : Usr.operands()) {
197 if (UsrOp.get() == this)
198 dropDroppableUse(UsrOp);
199 }
200 }
201
dropDroppableUse(Use & U)202 void Value::dropDroppableUse(Use &U) {
203 U.removeFromList();
204 if (auto *Assume = dyn_cast<IntrinsicInst>(U.getUser())) {
205 assert(Assume->getIntrinsicID() == Intrinsic::assume);
206 unsigned OpNo = U.getOperandNo();
207 if (OpNo == 0)
208 U.set(ConstantInt::getTrue(Assume->getContext()));
209 else {
210 U.set(UndefValue::get(U.get()->getType()));
211 CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
212 BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag("ignore");
213 }
214 return;
215 }
216
217 llvm_unreachable("unkown droppable use");
218 }
219
isUsedInBasicBlock(const BasicBlock * BB) const220 bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
221 // This can be computed either by scanning the instructions in BB, or by
222 // scanning the use list of this Value. Both lists can be very long, but
223 // usually one is quite short.
224 //
225 // Scan both lists simultaneously until one is exhausted. This limits the
226 // search to the shorter list.
227 BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
228 const_user_iterator UI = user_begin(), UE = user_end();
229 for (; BI != BE && UI != UE; ++BI, ++UI) {
230 // Scan basic block: Check if this Value is used by the instruction at BI.
231 if (is_contained(BI->operands(), this))
232 return true;
233 // Scan use list: Check if the use at UI is in BB.
234 const auto *User = dyn_cast<Instruction>(*UI);
235 if (User && User->getParent() == BB)
236 return true;
237 }
238 return false;
239 }
240
getNumUses() const241 unsigned Value::getNumUses() const {
242 return (unsigned)std::distance(use_begin(), use_end());
243 }
244
getSymTab(Value * V,ValueSymbolTable * & ST)245 static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
246 ST = nullptr;
247 if (Instruction *I = dyn_cast<Instruction>(V)) {
248 if (BasicBlock *P = I->getParent())
249 if (Function *PP = P->getParent())
250 ST = PP->getValueSymbolTable();
251 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
252 if (Function *P = BB->getParent())
253 ST = P->getValueSymbolTable();
254 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
255 if (Module *P = GV->getParent())
256 ST = &P->getValueSymbolTable();
257 } else if (Argument *A = dyn_cast<Argument>(V)) {
258 if (Function *P = A->getParent())
259 ST = P->getValueSymbolTable();
260 } else {
261 assert(isa<Constant>(V) && "Unknown value type!");
262 return true; // no name is setable for this.
263 }
264 return false;
265 }
266
getValueName() const267 ValueName *Value::getValueName() const {
268 if (!HasName) return nullptr;
269
270 LLVMContext &Ctx = getContext();
271 auto I = Ctx.pImpl->ValueNames.find(this);
272 assert(I != Ctx.pImpl->ValueNames.end() &&
273 "No name entry found!");
274
275 return I->second;
276 }
277
setValueName(ValueName * VN)278 void Value::setValueName(ValueName *VN) {
279 LLVMContext &Ctx = getContext();
280
281 assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
282 "HasName bit out of sync!");
283
284 if (!VN) {
285 if (HasName)
286 Ctx.pImpl->ValueNames.erase(this);
287 HasName = false;
288 return;
289 }
290
291 HasName = true;
292 Ctx.pImpl->ValueNames[this] = VN;
293 }
294
getName() const295 StringRef Value::getName() const {
296 // Make sure the empty string is still a C string. For historical reasons,
297 // some clients want to call .data() on the result and expect it to be null
298 // terminated.
299 if (!hasName())
300 return StringRef("", 0);
301 return getValueName()->getKey();
302 }
303
setNameImpl(const Twine & NewName)304 void Value::setNameImpl(const Twine &NewName) {
305 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
306 if (getContext().shouldDiscardValueNames() && !isa<GlobalValue>(this))
307 return;
308
309 // Fast path for common IRBuilder case of setName("") when there is no name.
310 if (NewName.isTriviallyEmpty() && !hasName())
311 return;
312
313 SmallString<256> NameData;
314 StringRef NameRef = NewName.toStringRef(NameData);
315 assert(NameRef.find_first_of(0) == StringRef::npos &&
316 "Null bytes are not allowed in names");
317
318 // Name isn't changing?
319 if (getName() == NameRef)
320 return;
321
322 // Cap the size of non-GlobalValue names.
323 if (NameRef.size() > NonGlobalValueMaxNameSize && !isa<GlobalValue>(this))
324 NameRef =
325 NameRef.substr(0, std::max(1u, (unsigned)NonGlobalValueMaxNameSize));
326
327 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
328
329 // Get the symbol table to update for this object.
330 ValueSymbolTable *ST;
331 if (getSymTab(this, ST))
332 return; // Cannot set a name on this value (e.g. constant).
333
334 if (!ST) { // No symbol table to update? Just do the change.
335 if (NameRef.empty()) {
336 // Free the name for this value.
337 destroyValueName();
338 return;
339 }
340
341 // NOTE: Could optimize for the case the name is shrinking to not deallocate
342 // then reallocated.
343 destroyValueName();
344
345 // Create the new name.
346 MallocAllocator Allocator;
347 setValueName(ValueName::Create(NameRef, Allocator));
348 getValueName()->setValue(this);
349 return;
350 }
351
352 // NOTE: Could optimize for the case the name is shrinking to not deallocate
353 // then reallocated.
354 if (hasName()) {
355 // Remove old name.
356 ST->removeValueName(getValueName());
357 destroyValueName();
358
359 if (NameRef.empty())
360 return;
361 }
362
363 // Name is changing to something new.
364 setValueName(ST->createValueName(NameRef, this));
365 }
366
setName(const Twine & NewName)367 void Value::setName(const Twine &NewName) {
368 setNameImpl(NewName);
369 if (Function *F = dyn_cast<Function>(this))
370 F->recalculateIntrinsicID();
371 }
372
takeName(Value * V)373 void Value::takeName(Value *V) {
374 ValueSymbolTable *ST = nullptr;
375 // If this value has a name, drop it.
376 if (hasName()) {
377 // Get the symtab this is in.
378 if (getSymTab(this, ST)) {
379 // We can't set a name on this value, but we need to clear V's name if
380 // it has one.
381 if (V->hasName()) V->setName("");
382 return; // Cannot set a name on this value (e.g. constant).
383 }
384
385 // Remove old name.
386 if (ST)
387 ST->removeValueName(getValueName());
388 destroyValueName();
389 }
390
391 // Now we know that this has no name.
392
393 // If V has no name either, we're done.
394 if (!V->hasName()) return;
395
396 // Get this's symtab if we didn't before.
397 if (!ST) {
398 if (getSymTab(this, ST)) {
399 // Clear V's name.
400 V->setName("");
401 return; // Cannot set a name on this value (e.g. constant).
402 }
403 }
404
405 // Get V's ST, this should always succed, because V has a name.
406 ValueSymbolTable *VST;
407 bool Failure = getSymTab(V, VST);
408 assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
409
410 // If these values are both in the same symtab, we can do this very fast.
411 // This works even if both values have no symtab yet.
412 if (ST == VST) {
413 // Take the name!
414 setValueName(V->getValueName());
415 V->setValueName(nullptr);
416 getValueName()->setValue(this);
417 return;
418 }
419
420 // Otherwise, things are slightly more complex. Remove V's name from VST and
421 // then reinsert it into ST.
422
423 if (VST)
424 VST->removeValueName(V->getValueName());
425 setValueName(V->getValueName());
426 V->setValueName(nullptr);
427 getValueName()->setValue(this);
428
429 if (ST)
430 ST->reinsertValue(this);
431 }
432
assertModuleIsMaterializedImpl() const433 void Value::assertModuleIsMaterializedImpl() const {
434 #ifndef NDEBUG
435 const GlobalValue *GV = dyn_cast<GlobalValue>(this);
436 if (!GV)
437 return;
438 const Module *M = GV->getParent();
439 if (!M)
440 return;
441 assert(M->isMaterialized());
442 #endif
443 }
444
445 #ifndef NDEBUG
contains(SmallPtrSetImpl<ConstantExpr * > & Cache,ConstantExpr * Expr,Constant * C)446 static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
447 Constant *C) {
448 if (!Cache.insert(Expr).second)
449 return false;
450
451 for (auto &O : Expr->operands()) {
452 if (O == C)
453 return true;
454 auto *CE = dyn_cast<ConstantExpr>(O);
455 if (!CE)
456 continue;
457 if (contains(Cache, CE, C))
458 return true;
459 }
460 return false;
461 }
462
contains(Value * Expr,Value * V)463 static bool contains(Value *Expr, Value *V) {
464 if (Expr == V)
465 return true;
466
467 auto *C = dyn_cast<Constant>(V);
468 if (!C)
469 return false;
470
471 auto *CE = dyn_cast<ConstantExpr>(Expr);
472 if (!CE)
473 return false;
474
475 SmallPtrSet<ConstantExpr *, 4> Cache;
476 return contains(Cache, CE, C);
477 }
478 #endif // NDEBUG
479
doRAUW(Value * New,ReplaceMetadataUses ReplaceMetaUses)480 void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
481 assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
482 assert(!contains(New, this) &&
483 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
484 assert(New->getType() == getType() &&
485 "replaceAllUses of value with new value of different type!");
486
487 // Notify all ValueHandles (if present) that this value is going away.
488 if (HasValueHandle)
489 ValueHandleBase::ValueIsRAUWd(this, New);
490 if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
491 ValueAsMetadata::handleRAUW(this, New);
492
493 while (!materialized_use_empty()) {
494 Use &U = *UseList;
495 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
496 // constant because they are uniqued.
497 if (auto *C = dyn_cast<Constant>(U.getUser())) {
498 if (!isa<GlobalValue>(C)) {
499 C->handleOperandChange(this, New);
500 continue;
501 }
502 }
503
504 U.set(New);
505 }
506
507 if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
508 BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
509 }
510
replaceAllUsesWith(Value * New)511 void Value::replaceAllUsesWith(Value *New) {
512 doRAUW(New, ReplaceMetadataUses::Yes);
513 }
514
replaceNonMetadataUsesWith(Value * New)515 void Value::replaceNonMetadataUsesWith(Value *New) {
516 doRAUW(New, ReplaceMetadataUses::No);
517 }
518
519 // Like replaceAllUsesWith except it does not handle constants or basic blocks.
520 // This routine leaves uses within BB.
replaceUsesOutsideBlock(Value * New,BasicBlock * BB)521 void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
522 assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
523 assert(!contains(New, this) &&
524 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
525 assert(New->getType() == getType() &&
526 "replaceUses of value with new value of different type!");
527 assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
528
529 replaceUsesWithIf(New, [BB](Use &U) {
530 auto *I = dyn_cast<Instruction>(U.getUser());
531 // Don't replace if it's an instruction in the BB basic block.
532 return !I || I->getParent() != BB;
533 });
534 }
535
536 namespace {
537 // Various metrics for how much to strip off of pointers.
538 enum PointerStripKind {
539 PSK_ZeroIndices,
540 PSK_ZeroIndicesAndAliases,
541 PSK_ZeroIndicesSameRepresentation,
542 PSK_ZeroIndicesAndInvariantGroups,
543 PSK_InBoundsConstantIndices,
544 PSK_InBounds
545 };
546
NoopCallback(const Value *)547 template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
548
549 template <PointerStripKind StripKind>
stripPointerCastsAndOffsets(const Value * V,function_ref<void (const Value *)> Func=NoopCallback<StripKind>)550 static const Value *stripPointerCastsAndOffsets(
551 const Value *V,
552 function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
553 if (!V->getType()->isPointerTy())
554 return V;
555
556 // Even though we don't look through PHI nodes, we could be called on an
557 // instruction in an unreachable block, which may be on a cycle.
558 SmallPtrSet<const Value *, 4> Visited;
559
560 Visited.insert(V);
561 do {
562 Func(V);
563 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
564 switch (StripKind) {
565 case PSK_ZeroIndices:
566 case PSK_ZeroIndicesAndAliases:
567 case PSK_ZeroIndicesSameRepresentation:
568 case PSK_ZeroIndicesAndInvariantGroups:
569 if (!GEP->hasAllZeroIndices())
570 return V;
571 break;
572 case PSK_InBoundsConstantIndices:
573 if (!GEP->hasAllConstantIndices())
574 return V;
575 LLVM_FALLTHROUGH;
576 case PSK_InBounds:
577 if (!GEP->isInBounds())
578 return V;
579 break;
580 }
581 V = GEP->getPointerOperand();
582 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
583 V = cast<Operator>(V)->getOperand(0);
584 if (!V->getType()->isPointerTy())
585 return V;
586 } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
587 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
588 // TODO: If we know an address space cast will not change the
589 // representation we could look through it here as well.
590 V = cast<Operator>(V)->getOperand(0);
591 } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
592 V = cast<GlobalAlias>(V)->getAliasee();
593 } else {
594 if (const auto *Call = dyn_cast<CallBase>(V)) {
595 if (const Value *RV = Call->getReturnedArgOperand()) {
596 V = RV;
597 continue;
598 }
599 // The result of launder.invariant.group must alias it's argument,
600 // but it can't be marked with returned attribute, that's why it needs
601 // special case.
602 if (StripKind == PSK_ZeroIndicesAndInvariantGroups &&
603 (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
604 Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
605 V = Call->getArgOperand(0);
606 continue;
607 }
608 }
609 return V;
610 }
611 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
612 } while (Visited.insert(V).second);
613
614 return V;
615 }
616 } // end anonymous namespace
617
stripPointerCasts() const618 const Value *Value::stripPointerCasts() const {
619 return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
620 }
621
stripPointerCastsAndAliases() const622 const Value *Value::stripPointerCastsAndAliases() const {
623 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
624 }
625
stripPointerCastsSameRepresentation() const626 const Value *Value::stripPointerCastsSameRepresentation() const {
627 return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
628 }
629
stripInBoundsConstantOffsets() const630 const Value *Value::stripInBoundsConstantOffsets() const {
631 return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
632 }
633
stripPointerCastsAndInvariantGroups() const634 const Value *Value::stripPointerCastsAndInvariantGroups() const {
635 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndInvariantGroups>(this);
636 }
637
stripAndAccumulateConstantOffsets(const DataLayout & DL,APInt & Offset,bool AllowNonInbounds,function_ref<bool (Value &,APInt &)> ExternalAnalysis) const638 const Value *Value::stripAndAccumulateConstantOffsets(
639 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
640 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
641 if (!getType()->isPtrOrPtrVectorTy())
642 return this;
643
644 unsigned BitWidth = Offset.getBitWidth();
645 assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
646 "The offset bit width does not match the DL specification.");
647
648 // Even though we don't look through PHI nodes, we could be called on an
649 // instruction in an unreachable block, which may be on a cycle.
650 SmallPtrSet<const Value *, 4> Visited;
651 Visited.insert(this);
652 const Value *V = this;
653 do {
654 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
655 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
656 if (!AllowNonInbounds && !GEP->isInBounds())
657 return V;
658
659 // If one of the values we have visited is an addrspacecast, then
660 // the pointer type of this GEP may be different from the type
661 // of the Ptr parameter which was passed to this function. This
662 // means when we construct GEPOffset, we need to use the size
663 // of GEP's pointer type rather than the size of the original
664 // pointer type.
665 APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
666 if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
667 return V;
668
669 // Stop traversal if the pointer offset wouldn't fit in the bit-width
670 // provided by the Offset argument. This can happen due to AddrSpaceCast
671 // stripping.
672 if (GEPOffset.getMinSignedBits() > BitWidth)
673 return V;
674
675 // External Analysis can return a result higher/lower than the value
676 // represents. We need to detect overflow/underflow.
677 APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
678 if (!ExternalAnalysis) {
679 Offset += GEPOffsetST;
680 } else {
681 bool Overflow = false;
682 APInt OldOffset = Offset;
683 Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
684 if (Overflow) {
685 Offset = OldOffset;
686 return V;
687 }
688 }
689 V = GEP->getPointerOperand();
690 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
691 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
692 V = cast<Operator>(V)->getOperand(0);
693 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
694 if (!GA->isInterposable())
695 V = GA->getAliasee();
696 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
697 if (const Value *RV = Call->getReturnedArgOperand())
698 V = RV;
699 }
700 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
701 } while (Visited.insert(V).second);
702
703 return V;
704 }
705
706 const Value *
stripInBoundsOffsets(function_ref<void (const Value *)> Func) const707 Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
708 return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
709 }
710
getPointerDereferenceableBytes(const DataLayout & DL,bool & CanBeNull) const711 uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
712 bool &CanBeNull) const {
713 assert(getType()->isPointerTy() && "must be pointer");
714
715 uint64_t DerefBytes = 0;
716 CanBeNull = false;
717 if (const Argument *A = dyn_cast<Argument>(this)) {
718 DerefBytes = A->getDereferenceableBytes();
719 if (DerefBytes == 0) {
720 // Handle byval/byref/inalloca/preallocated arguments
721 if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
722 if (ArgMemTy->isSized()) {
723 // FIXME: Why isn't this the type alloc size?
724 DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinSize();
725 }
726 }
727 }
728
729 if (DerefBytes == 0) {
730 DerefBytes = A->getDereferenceableOrNullBytes();
731 CanBeNull = true;
732 }
733 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
734 DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex);
735 if (DerefBytes == 0) {
736 DerefBytes =
737 Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
738 CanBeNull = true;
739 }
740 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
741 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
742 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
743 DerefBytes = CI->getLimitedValue();
744 }
745 if (DerefBytes == 0) {
746 if (MDNode *MD =
747 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
748 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
749 DerefBytes = CI->getLimitedValue();
750 }
751 CanBeNull = true;
752 }
753 } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
754 if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
755 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
756 DerefBytes = CI->getLimitedValue();
757 }
758 if (DerefBytes == 0) {
759 if (MDNode *MD =
760 IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
761 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
762 DerefBytes = CI->getLimitedValue();
763 }
764 CanBeNull = true;
765 }
766 } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
767 if (!AI->isArrayAllocation()) {
768 DerefBytes =
769 DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize();
770 CanBeNull = false;
771 }
772 } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
773 if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
774 // TODO: Don't outright reject hasExternalWeakLinkage but set the
775 // CanBeNull flag.
776 DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedSize();
777 CanBeNull = false;
778 }
779 }
780 return DerefBytes;
781 }
782
getPointerAlignment(const DataLayout & DL) const783 Align Value::getPointerAlignment(const DataLayout &DL) const {
784 assert(getType()->isPointerTy() && "must be pointer");
785 if (auto *GO = dyn_cast<GlobalObject>(this)) {
786 if (isa<Function>(GO)) {
787 Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
788 switch (DL.getFunctionPtrAlignType()) {
789 case DataLayout::FunctionPtrAlignType::Independent:
790 return FunctionPtrAlign;
791 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
792 return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
793 }
794 llvm_unreachable("Unhandled FunctionPtrAlignType");
795 }
796 const MaybeAlign Alignment(GO->getAlignment());
797 if (!Alignment) {
798 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
799 Type *ObjectType = GVar->getValueType();
800 if (ObjectType->isSized()) {
801 // If the object is defined in the current Module, we'll be giving
802 // it the preferred alignment. Otherwise, we have to assume that it
803 // may only have the minimum ABI alignment.
804 if (GVar->isStrongDefinitionForLinker())
805 return DL.getPreferredAlign(GVar);
806 else
807 return DL.getABITypeAlign(ObjectType);
808 }
809 }
810 }
811 return Alignment.valueOrOne();
812 } else if (const Argument *A = dyn_cast<Argument>(this)) {
813 const MaybeAlign Alignment = A->getParamAlign();
814 if (!Alignment && A->hasStructRetAttr()) {
815 // An sret parameter has at least the ABI alignment of the return type.
816 Type *EltTy = A->getParamStructRetType();
817 if (EltTy->isSized())
818 return DL.getABITypeAlign(EltTy);
819 }
820 return Alignment.valueOrOne();
821 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
822 return AI->getAlign();
823 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
824 MaybeAlign Alignment = Call->getRetAlign();
825 if (!Alignment && Call->getCalledFunction())
826 Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
827 return Alignment.valueOrOne();
828 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
829 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
830 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
831 return Align(CI->getLimitedValue());
832 }
833 } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
834 if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
835 const_cast<Constant *>(CstPtr), DL.getIntPtrType(getType()),
836 /*OnlyIfReduced=*/true))) {
837 size_t TrailingZeros = CstInt->getValue().countTrailingZeros();
838 // While the actual alignment may be large, elsewhere we have
839 // an arbitrary upper alignmet limit, so let's clamp to it.
840 return Align(TrailingZeros < Value::MaxAlignmentExponent
841 ? uint64_t(1) << TrailingZeros
842 : Value::MaximumAlignment);
843 }
844 }
845 return Align(1);
846 }
847
DoPHITranslation(const BasicBlock * CurBB,const BasicBlock * PredBB) const848 const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
849 const BasicBlock *PredBB) const {
850 auto *PN = dyn_cast<PHINode>(this);
851 if (PN && PN->getParent() == CurBB)
852 return PN->getIncomingValueForBlock(PredBB);
853 return this;
854 }
855
getContext() const856 LLVMContext &Value::getContext() const { return VTy->getContext(); }
857
reverseUseList()858 void Value::reverseUseList() {
859 if (!UseList || !UseList->Next)
860 // No need to reverse 0 or 1 uses.
861 return;
862
863 Use *Head = UseList;
864 Use *Current = UseList->Next;
865 Head->Next = nullptr;
866 while (Current) {
867 Use *Next = Current->Next;
868 Current->Next = Head;
869 Head->Prev = &Current->Next;
870 Head = Current;
871 Current = Next;
872 }
873 UseList = Head;
874 Head->Prev = &UseList;
875 }
876
isSwiftError() const877 bool Value::isSwiftError() const {
878 auto *Arg = dyn_cast<Argument>(this);
879 if (Arg)
880 return Arg->hasSwiftErrorAttr();
881 auto *Alloca = dyn_cast<AllocaInst>(this);
882 if (!Alloca)
883 return false;
884 return Alloca->isSwiftError();
885 }
886
887 //===----------------------------------------------------------------------===//
888 // ValueHandleBase Class
889 //===----------------------------------------------------------------------===//
890
AddToExistingUseList(ValueHandleBase ** List)891 void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
892 assert(List && "Handle list is null?");
893
894 // Splice ourselves into the list.
895 Next = *List;
896 *List = this;
897 setPrevPtr(List);
898 if (Next) {
899 Next->setPrevPtr(&Next);
900 assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
901 }
902 }
903
AddToExistingUseListAfter(ValueHandleBase * List)904 void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
905 assert(List && "Must insert after existing node");
906
907 Next = List->Next;
908 setPrevPtr(&List->Next);
909 List->Next = this;
910 if (Next)
911 Next->setPrevPtr(&Next);
912 }
913
AddToUseList()914 void ValueHandleBase::AddToUseList() {
915 assert(getValPtr() && "Null pointer doesn't have a use list!");
916
917 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
918
919 if (getValPtr()->HasValueHandle) {
920 // If this value already has a ValueHandle, then it must be in the
921 // ValueHandles map already.
922 ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
923 assert(Entry && "Value doesn't have any handles?");
924 AddToExistingUseList(&Entry);
925 return;
926 }
927
928 // Ok, it doesn't have any handles yet, so we must insert it into the
929 // DenseMap. However, doing this insertion could cause the DenseMap to
930 // reallocate itself, which would invalidate all of the PrevP pointers that
931 // point into the old table. Handle this by checking for reallocation and
932 // updating the stale pointers only if needed.
933 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
934 const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
935
936 ValueHandleBase *&Entry = Handles[getValPtr()];
937 assert(!Entry && "Value really did already have handles?");
938 AddToExistingUseList(&Entry);
939 getValPtr()->HasValueHandle = true;
940
941 // If reallocation didn't happen or if this was the first insertion, don't
942 // walk the table.
943 if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
944 Handles.size() == 1) {
945 return;
946 }
947
948 // Okay, reallocation did happen. Fix the Prev Pointers.
949 for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
950 E = Handles.end(); I != E; ++I) {
951 assert(I->second && I->first == I->second->getValPtr() &&
952 "List invariant broken!");
953 I->second->setPrevPtr(&I->second);
954 }
955 }
956
RemoveFromUseList()957 void ValueHandleBase::RemoveFromUseList() {
958 assert(getValPtr() && getValPtr()->HasValueHandle &&
959 "Pointer doesn't have a use list!");
960
961 // Unlink this from its use list.
962 ValueHandleBase **PrevPtr = getPrevPtr();
963 assert(*PrevPtr == this && "List invariant broken");
964
965 *PrevPtr = Next;
966 if (Next) {
967 assert(Next->getPrevPtr() == &Next && "List invariant broken");
968 Next->setPrevPtr(PrevPtr);
969 return;
970 }
971
972 // If the Next pointer was null, then it is possible that this was the last
973 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
974 // map.
975 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
976 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
977 if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
978 Handles.erase(getValPtr());
979 getValPtr()->HasValueHandle = false;
980 }
981 }
982
ValueIsDeleted(Value * V)983 void ValueHandleBase::ValueIsDeleted(Value *V) {
984 assert(V->HasValueHandle && "Should only be called if ValueHandles present");
985
986 // Get the linked list base, which is guaranteed to exist since the
987 // HasValueHandle flag is set.
988 LLVMContextImpl *pImpl = V->getContext().pImpl;
989 ValueHandleBase *Entry = pImpl->ValueHandles[V];
990 assert(Entry && "Value bit set but no entries exist");
991
992 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
993 // and remove themselves from the list without breaking our iteration. This
994 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
995 // Note that we deliberately do not the support the case when dropping a value
996 // handle results in a new value handle being permanently added to the list
997 // (as might occur in theory for CallbackVH's): the new value handle will not
998 // be processed and the checking code will mete out righteous punishment if
999 // the handle is still present once we have finished processing all the other
1000 // value handles (it is fine to momentarily add then remove a value handle).
1001 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1002 Iterator.RemoveFromUseList();
1003 Iterator.AddToExistingUseListAfter(Entry);
1004 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1005
1006 switch (Entry->getKind()) {
1007 case Assert:
1008 break;
1009 case Weak:
1010 case WeakTracking:
1011 // WeakTracking and Weak just go to null, which unlinks them
1012 // from the list.
1013 Entry->operator=(nullptr);
1014 break;
1015 case Callback:
1016 // Forward to the subclass's implementation.
1017 static_cast<CallbackVH*>(Entry)->deleted();
1018 break;
1019 }
1020 }
1021
1022 // All callbacks, weak references, and assertingVHs should be dropped by now.
1023 if (V->HasValueHandle) {
1024 #ifndef NDEBUG // Only in +Asserts mode...
1025 dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
1026 << "\n";
1027 if (pImpl->ValueHandles[V]->getKind() == Assert)
1028 llvm_unreachable("An asserting value handle still pointed to this"
1029 " value!");
1030
1031 #endif
1032 llvm_unreachable("All references to V were not removed?");
1033 }
1034 }
1035
ValueIsRAUWd(Value * Old,Value * New)1036 void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
1037 assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
1038 assert(Old != New && "Changing value into itself!");
1039 assert(Old->getType() == New->getType() &&
1040 "replaceAllUses of value with new value of different type!");
1041
1042 // Get the linked list base, which is guaranteed to exist since the
1043 // HasValueHandle flag is set.
1044 LLVMContextImpl *pImpl = Old->getContext().pImpl;
1045 ValueHandleBase *Entry = pImpl->ValueHandles[Old];
1046
1047 assert(Entry && "Value bit set but no entries exist");
1048
1049 // We use a local ValueHandleBase as an iterator so that
1050 // ValueHandles can add and remove themselves from the list without
1051 // breaking our iteration. This is not really an AssertingVH; we
1052 // just have to give ValueHandleBase some kind.
1053 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1054 Iterator.RemoveFromUseList();
1055 Iterator.AddToExistingUseListAfter(Entry);
1056 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1057
1058 switch (Entry->getKind()) {
1059 case Assert:
1060 case Weak:
1061 // Asserting and Weak handles do not follow RAUW implicitly.
1062 break;
1063 case WeakTracking:
1064 // Weak goes to the new value, which will unlink it from Old's list.
1065 Entry->operator=(New);
1066 break;
1067 case Callback:
1068 // Forward to the subclass's implementation.
1069 static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
1070 break;
1071 }
1072 }
1073
1074 #ifndef NDEBUG
1075 // If any new weak value handles were added while processing the
1076 // list, then complain about it now.
1077 if (Old->HasValueHandle)
1078 for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
1079 switch (Entry->getKind()) {
1080 case WeakTracking:
1081 dbgs() << "After RAUW from " << *Old->getType() << " %"
1082 << Old->getName() << " to " << *New->getType() << " %"
1083 << New->getName() << "\n";
1084 llvm_unreachable(
1085 "A weak tracking value handle still pointed to the old value!\n");
1086 default:
1087 break;
1088 }
1089 #endif
1090 }
1091
1092 // Pin the vtable to this file.
anchor()1093 void CallbackVH::anchor() {}
1094