1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // sanity checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 // * Both of a binary operator's parameters are of the same type
16 // * Verify that the indices of mem access instructions match other operands
17 // * Verify that arithmetic and other things are only performed on first-class
18 // types. Verify that shifts & logicals only happen on integrals f.e.
19 // * All of the constants in a switch statement are of the correct type
20 // * The code is in valid SSA form
21 // * It should be illegal to put a label into any other type (like a structure)
22 // or to return one. [except constant arrays!]
23 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 // * PHI nodes must have an entry for each predecessor, with no extras.
25 // * PHI nodes must be the first thing in a basic block, all grouped together
26 // * PHI nodes must have at least one entry
27 // * All basic blocks should only end with terminator insts, not contain them
28 // * The entry node to a function must not have predecessors
29 // * All Instructions must be embedded into a basic block
30 // * Functions cannot take a void-typed parameter
31 // * Verify that a function's argument list agrees with it's declared type.
32 // * It is illegal to specify a name for a void value.
33 // * It is illegal to have a internal global value with no initializer
34 // * It is illegal to have a ret instruction that returns a value that does not
35 // agree with the function return value type.
36 // * Function call argument types match the function prototype
37 // * A landing pad is defined by a landingpad instruction, and can be jumped to
38 // only by the unwind edge of an invoke instruction.
39 // * A landingpad instruction must be the first non-PHI instruction in the
40 // block.
41 // * Landingpad instructions must be in a function with a personality function.
42 // * All other things that are tested by asserts spread about the code...
43 //
44 //===----------------------------------------------------------------------===//
45
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
62 #include "llvm/BinaryFormat/Dwarf.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallingConv.h"
68 #include "llvm/IR/Comdat.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/ConstantRange.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
74 #include "llvm/IR/DebugInfoMetadata.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstVisitor.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/ModuleSlotTracker.h"
94 #include "llvm/IR/PassManager.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/Use.h"
98 #include "llvm/IR/User.h"
99 #include "llvm/IR/Value.h"
100 #include "llvm/InitializePasses.h"
101 #include "llvm/Pass.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CommandLine.h"
105 #include "llvm/Support/Debug.h"
106 #include "llvm/Support/ErrorHandling.h"
107 #include "llvm/Support/MathExtras.h"
108 #include "llvm/Support/raw_ostream.h"
109 #include <algorithm>
110 #include <cassert>
111 #include <cstdint>
112 #include <memory>
113 #include <string>
114 #include <utility>
115
116 using namespace llvm;
117
118 namespace llvm {
119
120 struct VerifierSupport {
121 raw_ostream *OS;
122 const Module &M;
123 ModuleSlotTracker MST;
124 Triple TT;
125 const DataLayout &DL;
126 LLVMContext &Context;
127
128 /// Track the brokenness of the module while recursively visiting.
129 bool Broken = false;
130 /// Broken debug info can be "recovered" from by stripping the debug info.
131 bool BrokenDebugInfo = false;
132 /// Whether to treat broken debug info as an error.
133 bool TreatBrokenDebugInfoAsError = true;
134
VerifierSupportllvm::VerifierSupport135 explicit VerifierSupport(raw_ostream *OS, const Module &M)
136 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
137 Context(M.getContext()) {}
138
139 private:
Writellvm::VerifierSupport140 void Write(const Module *M) {
141 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
142 }
143
Writellvm::VerifierSupport144 void Write(const Value *V) {
145 if (V)
146 Write(*V);
147 }
148
Writellvm::VerifierSupport149 void Write(const Value &V) {
150 if (isa<Instruction>(V)) {
151 V.print(*OS, MST);
152 *OS << '\n';
153 } else {
154 V.printAsOperand(*OS, true, MST);
155 *OS << '\n';
156 }
157 }
158
Writellvm::VerifierSupport159 void Write(const Metadata *MD) {
160 if (!MD)
161 return;
162 MD->print(*OS, MST, &M);
163 *OS << '\n';
164 }
165
Writellvm::VerifierSupport166 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
167 Write(MD.get());
168 }
169
Writellvm::VerifierSupport170 void Write(const NamedMDNode *NMD) {
171 if (!NMD)
172 return;
173 NMD->print(*OS, MST);
174 *OS << '\n';
175 }
176
Writellvm::VerifierSupport177 void Write(Type *T) {
178 if (!T)
179 return;
180 *OS << ' ' << *T;
181 }
182
Writellvm::VerifierSupport183 void Write(const Comdat *C) {
184 if (!C)
185 return;
186 *OS << *C;
187 }
188
Writellvm::VerifierSupport189 void Write(const APInt *AI) {
190 if (!AI)
191 return;
192 *OS << *AI << '\n';
193 }
194
Writellvm::VerifierSupport195 void Write(const unsigned i) { *OS << i << '\n'; }
196
Writellvm::VerifierSupport197 template <typename T> void Write(ArrayRef<T> Vs) {
198 for (const T &V : Vs)
199 Write(V);
200 }
201
202 template <typename T1, typename... Ts>
WriteTsllvm::VerifierSupport203 void WriteTs(const T1 &V1, const Ts &... Vs) {
204 Write(V1);
205 WriteTs(Vs...);
206 }
207
WriteTsllvm::VerifierSupport208 template <typename... Ts> void WriteTs() {}
209
210 public:
211 /// A check failed, so printout out the condition and the message.
212 ///
213 /// This provides a nice place to put a breakpoint if you want to see why
214 /// something is not correct.
CheckFailedllvm::VerifierSupport215 void CheckFailed(const Twine &Message) {
216 if (OS)
217 *OS << Message << '\n';
218 Broken = true;
219 }
220
221 /// A check failed (with values to print).
222 ///
223 /// This calls the Message-only version so that the above is easier to set a
224 /// breakpoint on.
225 template <typename T1, typename... Ts>
CheckFailedllvm::VerifierSupport226 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
227 CheckFailed(Message);
228 if (OS)
229 WriteTs(V1, Vs...);
230 }
231
232 /// A debug info check failed.
DebugInfoCheckFailedllvm::VerifierSupport233 void DebugInfoCheckFailed(const Twine &Message) {
234 if (OS)
235 *OS << Message << '\n';
236 Broken |= TreatBrokenDebugInfoAsError;
237 BrokenDebugInfo = true;
238 }
239
240 /// A debug info check failed (with values to print).
241 template <typename T1, typename... Ts>
DebugInfoCheckFailedllvm::VerifierSupport242 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
243 const Ts &... Vs) {
244 DebugInfoCheckFailed(Message);
245 if (OS)
246 WriteTs(V1, Vs...);
247 }
248 };
249
250 } // namespace llvm
251
252 namespace {
253
254 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
255 friend class InstVisitor<Verifier>;
256
257 DominatorTree DT;
258
259 /// When verifying a basic block, keep track of all of the
260 /// instructions we have seen so far.
261 ///
262 /// This allows us to do efficient dominance checks for the case when an
263 /// instruction has an operand that is an instruction in the same block.
264 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
265
266 /// Keep track of the metadata nodes that have been checked already.
267 SmallPtrSet<const Metadata *, 32> MDNodes;
268
269 /// Keep track which DISubprogram is attached to which function.
270 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
271
272 /// Track all DICompileUnits visited.
273 SmallPtrSet<const Metadata *, 2> CUVisited;
274
275 /// The result type for a landingpad.
276 Type *LandingPadResultTy;
277
278 /// Whether we've seen a call to @llvm.localescape in this function
279 /// already.
280 bool SawFrameEscape;
281
282 /// Whether the current function has a DISubprogram attached to it.
283 bool HasDebugInfo = false;
284
285 /// Whether source was present on the first DIFile encountered in each CU.
286 DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
287
288 /// Stores the count of how many objects were passed to llvm.localescape for a
289 /// given function and the largest index passed to llvm.localrecover.
290 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
291
292 // Maps catchswitches and cleanuppads that unwind to siblings to the
293 // terminators that indicate the unwind, used to detect cycles therein.
294 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
295
296 /// Cache of constants visited in search of ConstantExprs.
297 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
298
299 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
300 SmallVector<const Function *, 4> DeoptimizeDeclarations;
301
302 // Verify that this GlobalValue is only used in this module.
303 // This map is used to avoid visiting uses twice. We can arrive at a user
304 // twice, if they have multiple operands. In particular for very large
305 // constant expressions, we can arrive at a particular user many times.
306 SmallPtrSet<const Value *, 32> GlobalValueVisited;
307
308 // Keeps track of duplicate function argument debug info.
309 SmallVector<const DILocalVariable *, 16> DebugFnArgs;
310
311 TBAAVerifier TBAAVerifyHelper;
312
313 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
314
315 public:
Verifier(raw_ostream * OS,bool ShouldTreatBrokenDebugInfoAsError,const Module & M)316 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
317 const Module &M)
318 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
319 SawFrameEscape(false), TBAAVerifyHelper(this) {
320 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
321 }
322
hasBrokenDebugInfo() const323 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
324
verify(const Function & F)325 bool verify(const Function &F) {
326 assert(F.getParent() == &M &&
327 "An instance of this class only works with a specific module!");
328
329 // First ensure the function is well-enough formed to compute dominance
330 // information, and directly compute a dominance tree. We don't rely on the
331 // pass manager to provide this as it isolates us from a potentially
332 // out-of-date dominator tree and makes it significantly more complex to run
333 // this code outside of a pass manager.
334 // FIXME: It's really gross that we have to cast away constness here.
335 if (!F.empty())
336 DT.recalculate(const_cast<Function &>(F));
337
338 for (const BasicBlock &BB : F) {
339 if (!BB.empty() && BB.back().isTerminator())
340 continue;
341
342 if (OS) {
343 *OS << "Basic Block in function '" << F.getName()
344 << "' does not have terminator!\n";
345 BB.printAsOperand(*OS, true, MST);
346 *OS << "\n";
347 }
348 return false;
349 }
350
351 Broken = false;
352 // FIXME: We strip const here because the inst visitor strips const.
353 visit(const_cast<Function &>(F));
354 verifySiblingFuncletUnwinds();
355 InstsInThisBlock.clear();
356 DebugFnArgs.clear();
357 LandingPadResultTy = nullptr;
358 SawFrameEscape = false;
359 SiblingFuncletInfo.clear();
360
361 return !Broken;
362 }
363
364 /// Verify the module that this instance of \c Verifier was initialized with.
verify()365 bool verify() {
366 Broken = false;
367
368 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
369 for (const Function &F : M)
370 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
371 DeoptimizeDeclarations.push_back(&F);
372
373 // Now that we've visited every function, verify that we never asked to
374 // recover a frame index that wasn't escaped.
375 verifyFrameRecoverIndices();
376 for (const GlobalVariable &GV : M.globals())
377 visitGlobalVariable(GV);
378
379 for (const GlobalAlias &GA : M.aliases())
380 visitGlobalAlias(GA);
381
382 for (const NamedMDNode &NMD : M.named_metadata())
383 visitNamedMDNode(NMD);
384
385 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
386 visitComdat(SMEC.getValue());
387
388 visitModuleFlags(M);
389 visitModuleIdents(M);
390 visitModuleCommandLines(M);
391
392 verifyCompileUnits();
393
394 verifyDeoptimizeCallingConvs();
395 DISubprogramAttachments.clear();
396 return !Broken;
397 }
398
399 private:
400 // Verification methods...
401 void visitGlobalValue(const GlobalValue &GV);
402 void visitGlobalVariable(const GlobalVariable &GV);
403 void visitGlobalAlias(const GlobalAlias &GA);
404 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
405 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
406 const GlobalAlias &A, const Constant &C);
407 void visitNamedMDNode(const NamedMDNode &NMD);
408 void visitMDNode(const MDNode &MD);
409 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
410 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
411 void visitComdat(const Comdat &C);
412 void visitModuleIdents(const Module &M);
413 void visitModuleCommandLines(const Module &M);
414 void visitModuleFlags(const Module &M);
415 void visitModuleFlag(const MDNode *Op,
416 DenseMap<const MDString *, const MDNode *> &SeenIDs,
417 SmallVectorImpl<const MDNode *> &Requirements);
418 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
419 void visitFunction(const Function &F);
420 void visitBasicBlock(BasicBlock &BB);
421 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
422 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
423 void visitProfMetadata(Instruction &I, MDNode *MD);
424
425 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
426 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
427 #include "llvm/IR/Metadata.def"
428 void visitDIScope(const DIScope &N);
429 void visitDIVariable(const DIVariable &N);
430 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
431 void visitDITemplateParameter(const DITemplateParameter &N);
432
433 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
434
435 // InstVisitor overrides...
436 using InstVisitor<Verifier>::visit;
437 void visit(Instruction &I);
438
439 void visitTruncInst(TruncInst &I);
440 void visitZExtInst(ZExtInst &I);
441 void visitSExtInst(SExtInst &I);
442 void visitFPTruncInst(FPTruncInst &I);
443 void visitFPExtInst(FPExtInst &I);
444 void visitFPToUIInst(FPToUIInst &I);
445 void visitFPToSIInst(FPToSIInst &I);
446 void visitUIToFPInst(UIToFPInst &I);
447 void visitSIToFPInst(SIToFPInst &I);
448 void visitIntToPtrInst(IntToPtrInst &I);
449 void visitPtrToIntInst(PtrToIntInst &I);
450 void visitBitCastInst(BitCastInst &I);
451 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
452 void visitPHINode(PHINode &PN);
453 void visitCallBase(CallBase &Call);
454 void visitUnaryOperator(UnaryOperator &U);
455 void visitBinaryOperator(BinaryOperator &B);
456 void visitICmpInst(ICmpInst &IC);
457 void visitFCmpInst(FCmpInst &FC);
458 void visitExtractElementInst(ExtractElementInst &EI);
459 void visitInsertElementInst(InsertElementInst &EI);
460 void visitShuffleVectorInst(ShuffleVectorInst &EI);
visitVAArgInst(VAArgInst & VAA)461 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
462 void visitCallInst(CallInst &CI);
463 void visitInvokeInst(InvokeInst &II);
464 void visitGetElementPtrInst(GetElementPtrInst &GEP);
465 void visitLoadInst(LoadInst &LI);
466 void visitStoreInst(StoreInst &SI);
467 void verifyDominatesUse(Instruction &I, unsigned i);
468 void visitInstruction(Instruction &I);
469 void visitTerminator(Instruction &I);
470 void visitBranchInst(BranchInst &BI);
471 void visitReturnInst(ReturnInst &RI);
472 void visitSwitchInst(SwitchInst &SI);
473 void visitIndirectBrInst(IndirectBrInst &BI);
474 void visitCallBrInst(CallBrInst &CBI);
475 void visitSelectInst(SelectInst &SI);
476 void visitUserOp1(Instruction &I);
visitUserOp2(Instruction & I)477 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
478 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
479 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
480 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
481 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
482 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
483 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
484 void visitFenceInst(FenceInst &FI);
485 void visitAllocaInst(AllocaInst &AI);
486 void visitExtractValueInst(ExtractValueInst &EVI);
487 void visitInsertValueInst(InsertValueInst &IVI);
488 void visitEHPadPredecessors(Instruction &I);
489 void visitLandingPadInst(LandingPadInst &LPI);
490 void visitResumeInst(ResumeInst &RI);
491 void visitCatchPadInst(CatchPadInst &CPI);
492 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
493 void visitCleanupPadInst(CleanupPadInst &CPI);
494 void visitFuncletPadInst(FuncletPadInst &FPI);
495 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
496 void visitCleanupReturnInst(CleanupReturnInst &CRI);
497
498 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
499 void verifySwiftErrorValue(const Value *SwiftErrorVal);
500 void verifyMustTailCall(CallInst &CI);
501 bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
502 unsigned ArgNo, std::string &Suffix);
503 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
504 void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
505 const Value *V);
506 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
507 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
508 const Value *V, bool IsIntrinsic);
509 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
510
511 void visitConstantExprsRecursively(const Constant *EntryC);
512 void visitConstantExpr(const ConstantExpr *CE);
513 void verifyStatepoint(const CallBase &Call);
514 void verifyFrameRecoverIndices();
515 void verifySiblingFuncletUnwinds();
516
517 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
518 template <typename ValueOrMetadata>
519 void verifyFragmentExpression(const DIVariable &V,
520 DIExpression::FragmentInfo Fragment,
521 ValueOrMetadata *Desc);
522 void verifyFnArgs(const DbgVariableIntrinsic &I);
523 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
524
525 /// Module-level debug info verification...
526 void verifyCompileUnits();
527
528 /// Module-level verification that all @llvm.experimental.deoptimize
529 /// declarations share the same calling convention.
530 void verifyDeoptimizeCallingConvs();
531
532 /// Verify all-or-nothing property of DIFile source attribute within a CU.
533 void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
534 };
535
536 } // end anonymous namespace
537
538 /// We know that cond should be true, if not print an error message.
539 #define Assert(C, ...) \
540 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
541
542 /// We know that a debug info condition should be true, if not print
543 /// an error message.
544 #define AssertDI(C, ...) \
545 do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
546
visit(Instruction & I)547 void Verifier::visit(Instruction &I) {
548 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
549 Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
550 InstVisitor<Verifier>::visit(I);
551 }
552
553 // Helper to recursively iterate over indirect users. By
554 // returning false, the callback can ask to stop recursing
555 // further.
forEachUser(const Value * User,SmallPtrSet<const Value *,32> & Visited,llvm::function_ref<bool (const Value *)> Callback)556 static void forEachUser(const Value *User,
557 SmallPtrSet<const Value *, 32> &Visited,
558 llvm::function_ref<bool(const Value *)> Callback) {
559 if (!Visited.insert(User).second)
560 return;
561 for (const Value *TheNextUser : User->materialized_users())
562 if (Callback(TheNextUser))
563 forEachUser(TheNextUser, Visited, Callback);
564 }
565
visitGlobalValue(const GlobalValue & GV)566 void Verifier::visitGlobalValue(const GlobalValue &GV) {
567 Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
568 "Global is external, but doesn't have external or weak linkage!", &GV);
569
570 Assert(GV.getAlignment() <= Value::MaximumAlignment,
571 "huge alignment values are unsupported", &GV);
572 Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
573 "Only global variables can have appending linkage!", &GV);
574
575 if (GV.hasAppendingLinkage()) {
576 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
577 Assert(GVar && GVar->getValueType()->isArrayTy(),
578 "Only global arrays can have appending linkage!", GVar);
579 }
580
581 if (GV.isDeclarationForLinker())
582 Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
583
584 if (GV.hasDLLImportStorageClass()) {
585 Assert(!GV.isDSOLocal(),
586 "GlobalValue with DLLImport Storage is dso_local!", &GV);
587
588 Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
589 GV.hasAvailableExternallyLinkage(),
590 "Global is marked as dllimport, but not external", &GV);
591 }
592
593 if (GV.hasLocalLinkage())
594 Assert(GV.isDSOLocal(),
595 "GlobalValue with private or internal linkage must be dso_local!",
596 &GV);
597
598 if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage())
599 Assert(GV.isDSOLocal(),
600 "GlobalValue with non default visibility must be dso_local!", &GV);
601
602 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
603 if (const Instruction *I = dyn_cast<Instruction>(V)) {
604 if (!I->getParent() || !I->getParent()->getParent())
605 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
606 I);
607 else if (I->getParent()->getParent()->getParent() != &M)
608 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
609 I->getParent()->getParent(),
610 I->getParent()->getParent()->getParent());
611 return false;
612 } else if (const Function *F = dyn_cast<Function>(V)) {
613 if (F->getParent() != &M)
614 CheckFailed("Global is used by function in a different module", &GV, &M,
615 F, F->getParent());
616 return false;
617 }
618 return true;
619 });
620 }
621
visitGlobalVariable(const GlobalVariable & GV)622 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
623 if (GV.hasInitializer()) {
624 Assert(GV.getInitializer()->getType() == GV.getValueType(),
625 "Global variable initializer type does not match global "
626 "variable type!",
627 &GV);
628 // If the global has common linkage, it must have a zero initializer and
629 // cannot be constant.
630 if (GV.hasCommonLinkage()) {
631 Assert(GV.getInitializer()->isNullValue(),
632 "'common' global must have a zero initializer!", &GV);
633 Assert(!GV.isConstant(), "'common' global may not be marked constant!",
634 &GV);
635 Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
636 }
637 }
638
639 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
640 GV.getName() == "llvm.global_dtors")) {
641 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
642 "invalid linkage for intrinsic global variable", &GV);
643 // Don't worry about emitting an error for it not being an array,
644 // visitGlobalValue will complain on appending non-array.
645 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
646 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
647 PointerType *FuncPtrTy =
648 FunctionType::get(Type::getVoidTy(Context), false)->
649 getPointerTo(DL.getProgramAddressSpace());
650 Assert(STy &&
651 (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
652 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
653 STy->getTypeAtIndex(1) == FuncPtrTy,
654 "wrong type for intrinsic global variable", &GV);
655 Assert(STy->getNumElements() == 3,
656 "the third field of the element type is mandatory, "
657 "specify i8* null to migrate from the obsoleted 2-field form");
658 Type *ETy = STy->getTypeAtIndex(2);
659 Assert(ETy->isPointerTy() &&
660 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
661 "wrong type for intrinsic global variable", &GV);
662 }
663 }
664
665 if (GV.hasName() && (GV.getName() == "llvm.used" ||
666 GV.getName() == "llvm.compiler.used")) {
667 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
668 "invalid linkage for intrinsic global variable", &GV);
669 Type *GVType = GV.getValueType();
670 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
671 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
672 Assert(PTy, "wrong type for intrinsic global variable", &GV);
673 if (GV.hasInitializer()) {
674 const Constant *Init = GV.getInitializer();
675 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
676 Assert(InitArray, "wrong initalizer for intrinsic global variable",
677 Init);
678 for (Value *Op : InitArray->operands()) {
679 Value *V = Op->stripPointerCasts();
680 Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
681 isa<GlobalAlias>(V),
682 "invalid llvm.used member", V);
683 Assert(V->hasName(), "members of llvm.used must be named", V);
684 }
685 }
686 }
687 }
688
689 // Visit any debug info attachments.
690 SmallVector<MDNode *, 1> MDs;
691 GV.getMetadata(LLVMContext::MD_dbg, MDs);
692 for (auto *MD : MDs) {
693 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
694 visitDIGlobalVariableExpression(*GVE);
695 else
696 AssertDI(false, "!dbg attachment of global variable must be a "
697 "DIGlobalVariableExpression");
698 }
699
700 // Scalable vectors cannot be global variables, since we don't know
701 // the runtime size. If the global is a struct or an array containing
702 // scalable vectors, that will be caught by the isValidElementType methods
703 // in StructType or ArrayType instead.
704 if (auto *VTy = dyn_cast<VectorType>(GV.getValueType()))
705 Assert(!VTy->isScalable(), "Globals cannot contain scalable vectors", &GV);
706
707 if (!GV.hasInitializer()) {
708 visitGlobalValue(GV);
709 return;
710 }
711
712 // Walk any aggregate initializers looking for bitcasts between address spaces
713 visitConstantExprsRecursively(GV.getInitializer());
714
715 visitGlobalValue(GV);
716 }
717
visitAliaseeSubExpr(const GlobalAlias & GA,const Constant & C)718 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
719 SmallPtrSet<const GlobalAlias*, 4> Visited;
720 Visited.insert(&GA);
721 visitAliaseeSubExpr(Visited, GA, C);
722 }
723
visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias * > & Visited,const GlobalAlias & GA,const Constant & C)724 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
725 const GlobalAlias &GA, const Constant &C) {
726 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
727 Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
728 &GA);
729
730 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
731 Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
732
733 Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
734 &GA);
735 } else {
736 // Only continue verifying subexpressions of GlobalAliases.
737 // Do not recurse into global initializers.
738 return;
739 }
740 }
741
742 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
743 visitConstantExprsRecursively(CE);
744
745 for (const Use &U : C.operands()) {
746 Value *V = &*U;
747 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
748 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
749 else if (const auto *C2 = dyn_cast<Constant>(V))
750 visitAliaseeSubExpr(Visited, GA, *C2);
751 }
752 }
753
visitGlobalAlias(const GlobalAlias & GA)754 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
755 Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
756 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
757 "weak_odr, or external linkage!",
758 &GA);
759 const Constant *Aliasee = GA.getAliasee();
760 Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
761 Assert(GA.getType() == Aliasee->getType(),
762 "Alias and aliasee types should match!", &GA);
763
764 Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
765 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
766
767 visitAliaseeSubExpr(GA, *Aliasee);
768
769 visitGlobalValue(GA);
770 }
771
visitNamedMDNode(const NamedMDNode & NMD)772 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
773 // There used to be various other llvm.dbg.* nodes, but we don't support
774 // upgrading them and we want to reserve the namespace for future uses.
775 if (NMD.getName().startswith("llvm.dbg."))
776 AssertDI(NMD.getName() == "llvm.dbg.cu",
777 "unrecognized named metadata node in the llvm.dbg namespace",
778 &NMD);
779 for (const MDNode *MD : NMD.operands()) {
780 if (NMD.getName() == "llvm.dbg.cu")
781 AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
782
783 if (!MD)
784 continue;
785
786 visitMDNode(*MD);
787 }
788 }
789
visitMDNode(const MDNode & MD)790 void Verifier::visitMDNode(const MDNode &MD) {
791 // Only visit each node once. Metadata can be mutually recursive, so this
792 // avoids infinite recursion here, as well as being an optimization.
793 if (!MDNodes.insert(&MD).second)
794 return;
795
796 switch (MD.getMetadataID()) {
797 default:
798 llvm_unreachable("Invalid MDNode subclass");
799 case Metadata::MDTupleKind:
800 break;
801 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
802 case Metadata::CLASS##Kind: \
803 visit##CLASS(cast<CLASS>(MD)); \
804 break;
805 #include "llvm/IR/Metadata.def"
806 }
807
808 for (const Metadata *Op : MD.operands()) {
809 if (!Op)
810 continue;
811 Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
812 &MD, Op);
813 if (auto *N = dyn_cast<MDNode>(Op)) {
814 visitMDNode(*N);
815 continue;
816 }
817 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
818 visitValueAsMetadata(*V, nullptr);
819 continue;
820 }
821 }
822
823 // Check these last, so we diagnose problems in operands first.
824 Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
825 Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
826 }
827
visitValueAsMetadata(const ValueAsMetadata & MD,Function * F)828 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
829 Assert(MD.getValue(), "Expected valid value", &MD);
830 Assert(!MD.getValue()->getType()->isMetadataTy(),
831 "Unexpected metadata round-trip through values", &MD, MD.getValue());
832
833 auto *L = dyn_cast<LocalAsMetadata>(&MD);
834 if (!L)
835 return;
836
837 Assert(F, "function-local metadata used outside a function", L);
838
839 // If this was an instruction, bb, or argument, verify that it is in the
840 // function that we expect.
841 Function *ActualF = nullptr;
842 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
843 Assert(I->getParent(), "function-local metadata not in basic block", L, I);
844 ActualF = I->getParent()->getParent();
845 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
846 ActualF = BB->getParent();
847 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
848 ActualF = A->getParent();
849 assert(ActualF && "Unimplemented function local metadata case!");
850
851 Assert(ActualF == F, "function-local metadata used in wrong function", L);
852 }
853
visitMetadataAsValue(const MetadataAsValue & MDV,Function * F)854 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
855 Metadata *MD = MDV.getMetadata();
856 if (auto *N = dyn_cast<MDNode>(MD)) {
857 visitMDNode(*N);
858 return;
859 }
860
861 // Only visit each node once. Metadata can be mutually recursive, so this
862 // avoids infinite recursion here, as well as being an optimization.
863 if (!MDNodes.insert(MD).second)
864 return;
865
866 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
867 visitValueAsMetadata(*V, F);
868 }
869
isType(const Metadata * MD)870 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
isScope(const Metadata * MD)871 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
isDINode(const Metadata * MD)872 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
873
visitDILocation(const DILocation & N)874 void Verifier::visitDILocation(const DILocation &N) {
875 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
876 "location requires a valid scope", &N, N.getRawScope());
877 if (auto *IA = N.getRawInlinedAt())
878 AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
879 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
880 AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
881 }
882
visitGenericDINode(const GenericDINode & N)883 void Verifier::visitGenericDINode(const GenericDINode &N) {
884 AssertDI(N.getTag(), "invalid tag", &N);
885 }
886
visitDIScope(const DIScope & N)887 void Verifier::visitDIScope(const DIScope &N) {
888 if (auto *F = N.getRawFile())
889 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
890 }
891
visitDISubrange(const DISubrange & N)892 void Verifier::visitDISubrange(const DISubrange &N) {
893 AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
894 auto Count = N.getCount();
895 AssertDI(Count, "Count must either be a signed constant or a DIVariable",
896 &N);
897 AssertDI(!Count.is<ConstantInt*>() ||
898 Count.get<ConstantInt*>()->getSExtValue() >= -1,
899 "invalid subrange count", &N);
900 }
901
visitDIEnumerator(const DIEnumerator & N)902 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
903 AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
904 }
905
visitDIBasicType(const DIBasicType & N)906 void Verifier::visitDIBasicType(const DIBasicType &N) {
907 AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
908 N.getTag() == dwarf::DW_TAG_unspecified_type,
909 "invalid tag", &N);
910 AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
911 "has conflicting flags", &N);
912 }
913
visitDIDerivedType(const DIDerivedType & N)914 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
915 // Common scope checks.
916 visitDIScope(N);
917
918 AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
919 N.getTag() == dwarf::DW_TAG_pointer_type ||
920 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
921 N.getTag() == dwarf::DW_TAG_reference_type ||
922 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
923 N.getTag() == dwarf::DW_TAG_const_type ||
924 N.getTag() == dwarf::DW_TAG_volatile_type ||
925 N.getTag() == dwarf::DW_TAG_restrict_type ||
926 N.getTag() == dwarf::DW_TAG_atomic_type ||
927 N.getTag() == dwarf::DW_TAG_member ||
928 N.getTag() == dwarf::DW_TAG_inheritance ||
929 N.getTag() == dwarf::DW_TAG_friend,
930 "invalid tag", &N);
931 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
932 AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
933 N.getRawExtraData());
934 }
935
936 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
937 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
938 N.getRawBaseType());
939
940 if (N.getDWARFAddressSpace()) {
941 AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
942 N.getTag() == dwarf::DW_TAG_reference_type ||
943 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
944 "DWARF address space only applies to pointer or reference types",
945 &N);
946 }
947 }
948
949 /// Detect mutually exclusive flags.
hasConflictingReferenceFlags(unsigned Flags)950 static bool hasConflictingReferenceFlags(unsigned Flags) {
951 return ((Flags & DINode::FlagLValueReference) &&
952 (Flags & DINode::FlagRValueReference)) ||
953 ((Flags & DINode::FlagTypePassByValue) &&
954 (Flags & DINode::FlagTypePassByReference));
955 }
956
visitTemplateParams(const MDNode & N,const Metadata & RawParams)957 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
958 auto *Params = dyn_cast<MDTuple>(&RawParams);
959 AssertDI(Params, "invalid template params", &N, &RawParams);
960 for (Metadata *Op : Params->operands()) {
961 AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
962 &N, Params, Op);
963 }
964 }
965
visitDICompositeType(const DICompositeType & N)966 void Verifier::visitDICompositeType(const DICompositeType &N) {
967 // Common scope checks.
968 visitDIScope(N);
969
970 AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
971 N.getTag() == dwarf::DW_TAG_structure_type ||
972 N.getTag() == dwarf::DW_TAG_union_type ||
973 N.getTag() == dwarf::DW_TAG_enumeration_type ||
974 N.getTag() == dwarf::DW_TAG_class_type ||
975 N.getTag() == dwarf::DW_TAG_variant_part,
976 "invalid tag", &N);
977
978 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
979 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
980 N.getRawBaseType());
981
982 AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
983 "invalid composite elements", &N, N.getRawElements());
984 AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
985 N.getRawVTableHolder());
986 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
987 "invalid reference flags", &N);
988 unsigned DIBlockByRefStruct = 1 << 4;
989 AssertDI((N.getFlags() & DIBlockByRefStruct) == 0,
990 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
991
992 if (N.isVector()) {
993 const DINodeArray Elements = N.getElements();
994 AssertDI(Elements.size() == 1 &&
995 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
996 "invalid vector, expected one element of type subrange", &N);
997 }
998
999 if (auto *Params = N.getRawTemplateParams())
1000 visitTemplateParams(N, *Params);
1001
1002 if (N.getTag() == dwarf::DW_TAG_class_type ||
1003 N.getTag() == dwarf::DW_TAG_union_type) {
1004 AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
1005 "class/union requires a filename", &N, N.getFile());
1006 }
1007
1008 if (auto *D = N.getRawDiscriminator()) {
1009 AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1010 "discriminator can only appear on variant part");
1011 }
1012 }
1013
visitDISubroutineType(const DISubroutineType & N)1014 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1015 AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1016 if (auto *Types = N.getRawTypeArray()) {
1017 AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1018 for (Metadata *Ty : N.getTypeArray()->operands()) {
1019 AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1020 }
1021 }
1022 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1023 "invalid reference flags", &N);
1024 }
1025
visitDIFile(const DIFile & N)1026 void Verifier::visitDIFile(const DIFile &N) {
1027 AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1028 Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1029 if (Checksum) {
1030 AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1031 "invalid checksum kind", &N);
1032 size_t Size;
1033 switch (Checksum->Kind) {
1034 case DIFile::CSK_MD5:
1035 Size = 32;
1036 break;
1037 case DIFile::CSK_SHA1:
1038 Size = 40;
1039 break;
1040 }
1041 AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1042 AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1043 "invalid checksum", &N);
1044 }
1045 }
1046
visitDICompileUnit(const DICompileUnit & N)1047 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1048 AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1049 AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1050
1051 // Don't bother verifying the compilation directory or producer string
1052 // as those could be empty.
1053 AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1054 N.getRawFile());
1055 AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1056 N.getFile());
1057
1058 verifySourceDebugInfo(N, *N.getFile());
1059
1060 AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1061 "invalid emission kind", &N);
1062
1063 if (auto *Array = N.getRawEnumTypes()) {
1064 AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1065 for (Metadata *Op : N.getEnumTypes()->operands()) {
1066 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1067 AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1068 "invalid enum type", &N, N.getEnumTypes(), Op);
1069 }
1070 }
1071 if (auto *Array = N.getRawRetainedTypes()) {
1072 AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1073 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1074 AssertDI(Op && (isa<DIType>(Op) ||
1075 (isa<DISubprogram>(Op) &&
1076 !cast<DISubprogram>(Op)->isDefinition())),
1077 "invalid retained type", &N, Op);
1078 }
1079 }
1080 if (auto *Array = N.getRawGlobalVariables()) {
1081 AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1082 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1083 AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1084 "invalid global variable ref", &N, Op);
1085 }
1086 }
1087 if (auto *Array = N.getRawImportedEntities()) {
1088 AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1089 for (Metadata *Op : N.getImportedEntities()->operands()) {
1090 AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1091 &N, Op);
1092 }
1093 }
1094 if (auto *Array = N.getRawMacros()) {
1095 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1096 for (Metadata *Op : N.getMacros()->operands()) {
1097 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1098 }
1099 }
1100 CUVisited.insert(&N);
1101 }
1102
visitDISubprogram(const DISubprogram & N)1103 void Verifier::visitDISubprogram(const DISubprogram &N) {
1104 AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1105 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1106 if (auto *F = N.getRawFile())
1107 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1108 else
1109 AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1110 if (auto *T = N.getRawType())
1111 AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1112 AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1113 N.getRawContainingType());
1114 if (auto *Params = N.getRawTemplateParams())
1115 visitTemplateParams(N, *Params);
1116 if (auto *S = N.getRawDeclaration())
1117 AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1118 "invalid subprogram declaration", &N, S);
1119 if (auto *RawNode = N.getRawRetainedNodes()) {
1120 auto *Node = dyn_cast<MDTuple>(RawNode);
1121 AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1122 for (Metadata *Op : Node->operands()) {
1123 AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1124 "invalid retained nodes, expected DILocalVariable or DILabel",
1125 &N, Node, Op);
1126 }
1127 }
1128 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1129 "invalid reference flags", &N);
1130
1131 auto *Unit = N.getRawUnit();
1132 if (N.isDefinition()) {
1133 // Subprogram definitions (not part of the type hierarchy).
1134 AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1135 AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1136 AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1137 if (N.getFile())
1138 verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1139 } else {
1140 // Subprogram declarations (part of the type hierarchy).
1141 AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1142 }
1143
1144 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1145 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1146 AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1147 for (Metadata *Op : ThrownTypes->operands())
1148 AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1149 Op);
1150 }
1151
1152 if (N.areAllCallsDescribed())
1153 AssertDI(N.isDefinition(),
1154 "DIFlagAllCallsDescribed must be attached to a definition");
1155 }
1156
visitDILexicalBlockBase(const DILexicalBlockBase & N)1157 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1158 AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1159 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1160 "invalid local scope", &N, N.getRawScope());
1161 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1162 AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1163 }
1164
visitDILexicalBlock(const DILexicalBlock & N)1165 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1166 visitDILexicalBlockBase(N);
1167
1168 AssertDI(N.getLine() || !N.getColumn(),
1169 "cannot have column info without line info", &N);
1170 }
1171
visitDILexicalBlockFile(const DILexicalBlockFile & N)1172 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1173 visitDILexicalBlockBase(N);
1174 }
1175
visitDICommonBlock(const DICommonBlock & N)1176 void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1177 AssertDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1178 if (auto *S = N.getRawScope())
1179 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1180 if (auto *S = N.getRawDecl())
1181 AssertDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1182 }
1183
visitDINamespace(const DINamespace & N)1184 void Verifier::visitDINamespace(const DINamespace &N) {
1185 AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1186 if (auto *S = N.getRawScope())
1187 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1188 }
1189
visitDIMacro(const DIMacro & N)1190 void Verifier::visitDIMacro(const DIMacro &N) {
1191 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1192 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1193 "invalid macinfo type", &N);
1194 AssertDI(!N.getName().empty(), "anonymous macro", &N);
1195 if (!N.getValue().empty()) {
1196 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1197 }
1198 }
1199
visitDIMacroFile(const DIMacroFile & N)1200 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1201 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1202 "invalid macinfo type", &N);
1203 if (auto *F = N.getRawFile())
1204 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1205
1206 if (auto *Array = N.getRawElements()) {
1207 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1208 for (Metadata *Op : N.getElements()->operands()) {
1209 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1210 }
1211 }
1212 }
1213
visitDIModule(const DIModule & N)1214 void Verifier::visitDIModule(const DIModule &N) {
1215 AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1216 AssertDI(!N.getName().empty(), "anonymous module", &N);
1217 }
1218
visitDITemplateParameter(const DITemplateParameter & N)1219 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1220 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1221 }
1222
visitDITemplateTypeParameter(const DITemplateTypeParameter & N)1223 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1224 visitDITemplateParameter(N);
1225
1226 AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1227 &N);
1228 }
1229
visitDITemplateValueParameter(const DITemplateValueParameter & N)1230 void Verifier::visitDITemplateValueParameter(
1231 const DITemplateValueParameter &N) {
1232 visitDITemplateParameter(N);
1233
1234 AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1235 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1236 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1237 "invalid tag", &N);
1238 }
1239
visitDIVariable(const DIVariable & N)1240 void Verifier::visitDIVariable(const DIVariable &N) {
1241 if (auto *S = N.getRawScope())
1242 AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1243 if (auto *F = N.getRawFile())
1244 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1245 }
1246
visitDIGlobalVariable(const DIGlobalVariable & N)1247 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1248 // Checks common to all variables.
1249 visitDIVariable(N);
1250
1251 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1252 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1253 AssertDI(N.getType(), "missing global variable type", &N);
1254 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1255 AssertDI(isa<DIDerivedType>(Member),
1256 "invalid static data member declaration", &N, Member);
1257 }
1258 }
1259
visitDILocalVariable(const DILocalVariable & N)1260 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1261 // Checks common to all variables.
1262 visitDIVariable(N);
1263
1264 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1265 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1266 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1267 "local variable requires a valid scope", &N, N.getRawScope());
1268 if (auto Ty = N.getType())
1269 AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1270 }
1271
visitDILabel(const DILabel & N)1272 void Verifier::visitDILabel(const DILabel &N) {
1273 if (auto *S = N.getRawScope())
1274 AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1275 if (auto *F = N.getRawFile())
1276 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1277
1278 AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1279 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1280 "label requires a valid scope", &N, N.getRawScope());
1281 }
1282
visitDIExpression(const DIExpression & N)1283 void Verifier::visitDIExpression(const DIExpression &N) {
1284 AssertDI(N.isValid(), "invalid expression", &N);
1285 }
1286
visitDIGlobalVariableExpression(const DIGlobalVariableExpression & GVE)1287 void Verifier::visitDIGlobalVariableExpression(
1288 const DIGlobalVariableExpression &GVE) {
1289 AssertDI(GVE.getVariable(), "missing variable");
1290 if (auto *Var = GVE.getVariable())
1291 visitDIGlobalVariable(*Var);
1292 if (auto *Expr = GVE.getExpression()) {
1293 visitDIExpression(*Expr);
1294 if (auto Fragment = Expr->getFragmentInfo())
1295 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1296 }
1297 }
1298
visitDIObjCProperty(const DIObjCProperty & N)1299 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1300 AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1301 if (auto *T = N.getRawType())
1302 AssertDI(isType(T), "invalid type ref", &N, T);
1303 if (auto *F = N.getRawFile())
1304 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1305 }
1306
visitDIImportedEntity(const DIImportedEntity & N)1307 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1308 AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1309 N.getTag() == dwarf::DW_TAG_imported_declaration,
1310 "invalid tag", &N);
1311 if (auto *S = N.getRawScope())
1312 AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1313 AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1314 N.getRawEntity());
1315 }
1316
visitComdat(const Comdat & C)1317 void Verifier::visitComdat(const Comdat &C) {
1318 // In COFF the Module is invalid if the GlobalValue has private linkage.
1319 // Entities with private linkage don't have entries in the symbol table.
1320 if (TT.isOSBinFormatCOFF())
1321 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1322 Assert(!GV->hasPrivateLinkage(),
1323 "comdat global value has private linkage", GV);
1324 }
1325
visitModuleIdents(const Module & M)1326 void Verifier::visitModuleIdents(const Module &M) {
1327 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1328 if (!Idents)
1329 return;
1330
1331 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1332 // Scan each llvm.ident entry and make sure that this requirement is met.
1333 for (const MDNode *N : Idents->operands()) {
1334 Assert(N->getNumOperands() == 1,
1335 "incorrect number of operands in llvm.ident metadata", N);
1336 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1337 ("invalid value for llvm.ident metadata entry operand"
1338 "(the operand should be a string)"),
1339 N->getOperand(0));
1340 }
1341 }
1342
visitModuleCommandLines(const Module & M)1343 void Verifier::visitModuleCommandLines(const Module &M) {
1344 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1345 if (!CommandLines)
1346 return;
1347
1348 // llvm.commandline takes a list of metadata entry. Each entry has only one
1349 // string. Scan each llvm.commandline entry and make sure that this
1350 // requirement is met.
1351 for (const MDNode *N : CommandLines->operands()) {
1352 Assert(N->getNumOperands() == 1,
1353 "incorrect number of operands in llvm.commandline metadata", N);
1354 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1355 ("invalid value for llvm.commandline metadata entry operand"
1356 "(the operand should be a string)"),
1357 N->getOperand(0));
1358 }
1359 }
1360
visitModuleFlags(const Module & M)1361 void Verifier::visitModuleFlags(const Module &M) {
1362 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1363 if (!Flags) return;
1364
1365 // Scan each flag, and track the flags and requirements.
1366 DenseMap<const MDString*, const MDNode*> SeenIDs;
1367 SmallVector<const MDNode*, 16> Requirements;
1368 for (const MDNode *MDN : Flags->operands())
1369 visitModuleFlag(MDN, SeenIDs, Requirements);
1370
1371 // Validate that the requirements in the module are valid.
1372 for (const MDNode *Requirement : Requirements) {
1373 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1374 const Metadata *ReqValue = Requirement->getOperand(1);
1375
1376 const MDNode *Op = SeenIDs.lookup(Flag);
1377 if (!Op) {
1378 CheckFailed("invalid requirement on flag, flag is not present in module",
1379 Flag);
1380 continue;
1381 }
1382
1383 if (Op->getOperand(2) != ReqValue) {
1384 CheckFailed(("invalid requirement on flag, "
1385 "flag does not have the required value"),
1386 Flag);
1387 continue;
1388 }
1389 }
1390 }
1391
1392 void
visitModuleFlag(const MDNode * Op,DenseMap<const MDString *,const MDNode * > & SeenIDs,SmallVectorImpl<const MDNode * > & Requirements)1393 Verifier::visitModuleFlag(const MDNode *Op,
1394 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1395 SmallVectorImpl<const MDNode *> &Requirements) {
1396 // Each module flag should have three arguments, the merge behavior (a
1397 // constant int), the flag ID (an MDString), and the value.
1398 Assert(Op->getNumOperands() == 3,
1399 "incorrect number of operands in module flag", Op);
1400 Module::ModFlagBehavior MFB;
1401 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1402 Assert(
1403 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1404 "invalid behavior operand in module flag (expected constant integer)",
1405 Op->getOperand(0));
1406 Assert(false,
1407 "invalid behavior operand in module flag (unexpected constant)",
1408 Op->getOperand(0));
1409 }
1410 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1411 Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1412 Op->getOperand(1));
1413
1414 // Sanity check the values for behaviors with additional requirements.
1415 switch (MFB) {
1416 case Module::Error:
1417 case Module::Warning:
1418 case Module::Override:
1419 // These behavior types accept any value.
1420 break;
1421
1422 case Module::Max: {
1423 Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1424 "invalid value for 'max' module flag (expected constant integer)",
1425 Op->getOperand(2));
1426 break;
1427 }
1428
1429 case Module::Require: {
1430 // The value should itself be an MDNode with two operands, a flag ID (an
1431 // MDString), and a value.
1432 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1433 Assert(Value && Value->getNumOperands() == 2,
1434 "invalid value for 'require' module flag (expected metadata pair)",
1435 Op->getOperand(2));
1436 Assert(isa<MDString>(Value->getOperand(0)),
1437 ("invalid value for 'require' module flag "
1438 "(first value operand should be a string)"),
1439 Value->getOperand(0));
1440
1441 // Append it to the list of requirements, to check once all module flags are
1442 // scanned.
1443 Requirements.push_back(Value);
1444 break;
1445 }
1446
1447 case Module::Append:
1448 case Module::AppendUnique: {
1449 // These behavior types require the operand be an MDNode.
1450 Assert(isa<MDNode>(Op->getOperand(2)),
1451 "invalid value for 'append'-type module flag "
1452 "(expected a metadata node)",
1453 Op->getOperand(2));
1454 break;
1455 }
1456 }
1457
1458 // Unless this is a "requires" flag, check the ID is unique.
1459 if (MFB != Module::Require) {
1460 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1461 Assert(Inserted,
1462 "module flag identifiers must be unique (or of 'require' type)", ID);
1463 }
1464
1465 if (ID->getString() == "wchar_size") {
1466 ConstantInt *Value
1467 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1468 Assert(Value, "wchar_size metadata requires constant integer argument");
1469 }
1470
1471 if (ID->getString() == "Linker Options") {
1472 // If the llvm.linker.options named metadata exists, we assume that the
1473 // bitcode reader has upgraded the module flag. Otherwise the flag might
1474 // have been created by a client directly.
1475 Assert(M.getNamedMetadata("llvm.linker.options"),
1476 "'Linker Options' named metadata no longer supported");
1477 }
1478
1479 if (ID->getString() == "CG Profile") {
1480 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1481 visitModuleFlagCGProfileEntry(MDO);
1482 }
1483 }
1484
visitModuleFlagCGProfileEntry(const MDOperand & MDO)1485 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1486 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1487 if (!FuncMDO)
1488 return;
1489 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1490 Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1491 FuncMDO);
1492 };
1493 auto Node = dyn_cast_or_null<MDNode>(MDO);
1494 Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1495 CheckFunction(Node->getOperand(0));
1496 CheckFunction(Node->getOperand(1));
1497 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1498 Assert(Count && Count->getType()->isIntegerTy(),
1499 "expected an integer constant", Node->getOperand(2));
1500 }
1501
1502 /// Return true if this attribute kind only applies to functions.
isFuncOnlyAttr(Attribute::AttrKind Kind)1503 static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1504 switch (Kind) {
1505 case Attribute::NoReturn:
1506 case Attribute::NoSync:
1507 case Attribute::WillReturn:
1508 case Attribute::NoCfCheck:
1509 case Attribute::NoUnwind:
1510 case Attribute::NoInline:
1511 case Attribute::AlwaysInline:
1512 case Attribute::OptimizeForSize:
1513 case Attribute::StackProtect:
1514 case Attribute::StackProtectReq:
1515 case Attribute::StackProtectStrong:
1516 case Attribute::SafeStack:
1517 case Attribute::ShadowCallStack:
1518 case Attribute::NoRedZone:
1519 case Attribute::NoImplicitFloat:
1520 case Attribute::Naked:
1521 case Attribute::InlineHint:
1522 case Attribute::StackAlignment:
1523 case Attribute::UWTable:
1524 case Attribute::NonLazyBind:
1525 case Attribute::ReturnsTwice:
1526 case Attribute::SanitizeAddress:
1527 case Attribute::SanitizeHWAddress:
1528 case Attribute::SanitizeMemTag:
1529 case Attribute::SanitizeThread:
1530 case Attribute::SanitizeMemory:
1531 case Attribute::MinSize:
1532 case Attribute::NoDuplicate:
1533 case Attribute::Builtin:
1534 case Attribute::NoBuiltin:
1535 case Attribute::Cold:
1536 case Attribute::OptForFuzzing:
1537 case Attribute::OptimizeNone:
1538 case Attribute::JumpTable:
1539 case Attribute::Convergent:
1540 case Attribute::ArgMemOnly:
1541 case Attribute::NoRecurse:
1542 case Attribute::InaccessibleMemOnly:
1543 case Attribute::InaccessibleMemOrArgMemOnly:
1544 case Attribute::AllocSize:
1545 case Attribute::SpeculativeLoadHardening:
1546 case Attribute::Speculatable:
1547 case Attribute::StrictFP:
1548 return true;
1549 default:
1550 break;
1551 }
1552 return false;
1553 }
1554
1555 /// Return true if this is a function attribute that can also appear on
1556 /// arguments.
isFuncOrArgAttr(Attribute::AttrKind Kind)1557 static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1558 return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1559 Kind == Attribute::ReadNone || Kind == Attribute::NoFree;
1560 }
1561
verifyAttributeTypes(AttributeSet Attrs,bool IsFunction,const Value * V)1562 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1563 const Value *V) {
1564 for (Attribute A : Attrs) {
1565 if (A.isStringAttribute())
1566 continue;
1567
1568 if (isFuncOnlyAttr(A.getKindAsEnum())) {
1569 if (!IsFunction) {
1570 CheckFailed("Attribute '" + A.getAsString() +
1571 "' only applies to functions!",
1572 V);
1573 return;
1574 }
1575 } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1576 CheckFailed("Attribute '" + A.getAsString() +
1577 "' does not apply to functions!",
1578 V);
1579 return;
1580 }
1581 }
1582 }
1583
1584 // VerifyParameterAttrs - Check the given attributes for an argument or return
1585 // value of the specified type. The value V is printed in error messages.
verifyParameterAttrs(AttributeSet Attrs,Type * Ty,const Value * V)1586 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1587 const Value *V) {
1588 if (!Attrs.hasAttributes())
1589 return;
1590
1591 verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1592
1593 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1594 Assert(Attrs.getNumAttributes() == 1,
1595 "Attribute 'immarg' is incompatible with other attributes", V);
1596 }
1597
1598 // Check for mutually incompatible attributes. Only inreg is compatible with
1599 // sret.
1600 unsigned AttrCount = 0;
1601 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1602 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1603 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1604 Attrs.hasAttribute(Attribute::InReg);
1605 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1606 Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1607 "and 'sret' are incompatible!",
1608 V);
1609
1610 Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1611 Attrs.hasAttribute(Attribute::ReadOnly)),
1612 "Attributes "
1613 "'inalloca and readonly' are incompatible!",
1614 V);
1615
1616 Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1617 Attrs.hasAttribute(Attribute::Returned)),
1618 "Attributes "
1619 "'sret and returned' are incompatible!",
1620 V);
1621
1622 Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1623 Attrs.hasAttribute(Attribute::SExt)),
1624 "Attributes "
1625 "'zeroext and signext' are incompatible!",
1626 V);
1627
1628 Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1629 Attrs.hasAttribute(Attribute::ReadOnly)),
1630 "Attributes "
1631 "'readnone and readonly' are incompatible!",
1632 V);
1633
1634 Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1635 Attrs.hasAttribute(Attribute::WriteOnly)),
1636 "Attributes "
1637 "'readnone and writeonly' are incompatible!",
1638 V);
1639
1640 Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1641 Attrs.hasAttribute(Attribute::WriteOnly)),
1642 "Attributes "
1643 "'readonly and writeonly' are incompatible!",
1644 V);
1645
1646 Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1647 Attrs.hasAttribute(Attribute::AlwaysInline)),
1648 "Attributes "
1649 "'noinline and alwaysinline' are incompatible!",
1650 V);
1651
1652 if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
1653 Assert(Attrs.getByValType() == cast<PointerType>(Ty)->getElementType(),
1654 "Attribute 'byval' type does not match parameter!", V);
1655 }
1656
1657 AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1658 Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1659 "Wrong types for attribute: " +
1660 AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1661 V);
1662
1663 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1664 SmallPtrSet<Type*, 4> Visited;
1665 if (!PTy->getElementType()->isSized(&Visited)) {
1666 Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1667 !Attrs.hasAttribute(Attribute::InAlloca),
1668 "Attributes 'byval' and 'inalloca' do not support unsized types!",
1669 V);
1670 }
1671 if (!isa<PointerType>(PTy->getElementType()))
1672 Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1673 "Attribute 'swifterror' only applies to parameters "
1674 "with pointer to pointer type!",
1675 V);
1676 } else {
1677 Assert(!Attrs.hasAttribute(Attribute::ByVal),
1678 "Attribute 'byval' only applies to parameters with pointer type!",
1679 V);
1680 Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1681 "Attribute 'swifterror' only applies to parameters "
1682 "with pointer type!",
1683 V);
1684 }
1685 }
1686
1687 // Check parameter attributes against a function type.
1688 // The value V is printed in error messages.
verifyFunctionAttrs(FunctionType * FT,AttributeList Attrs,const Value * V,bool IsIntrinsic)1689 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1690 const Value *V, bool IsIntrinsic) {
1691 if (Attrs.isEmpty())
1692 return;
1693
1694 bool SawNest = false;
1695 bool SawReturned = false;
1696 bool SawSRet = false;
1697 bool SawSwiftSelf = false;
1698 bool SawSwiftError = false;
1699
1700 // Verify return value attributes.
1701 AttributeSet RetAttrs = Attrs.getRetAttributes();
1702 Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1703 !RetAttrs.hasAttribute(Attribute::Nest) &&
1704 !RetAttrs.hasAttribute(Attribute::StructRet) &&
1705 !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1706 !RetAttrs.hasAttribute(Attribute::NoFree) &&
1707 !RetAttrs.hasAttribute(Attribute::Returned) &&
1708 !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1709 !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1710 !RetAttrs.hasAttribute(Attribute::SwiftError)),
1711 "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', 'nofree'"
1712 "'returned', 'swiftself', and 'swifterror' do not apply to return "
1713 "values!",
1714 V);
1715 Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1716 !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1717 !RetAttrs.hasAttribute(Attribute::ReadNone)),
1718 "Attribute '" + RetAttrs.getAsString() +
1719 "' does not apply to function returns",
1720 V);
1721 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1722
1723 // Verify parameter attributes.
1724 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1725 Type *Ty = FT->getParamType(i);
1726 AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1727
1728 if (!IsIntrinsic) {
1729 Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1730 "immarg attribute only applies to intrinsics",V);
1731 }
1732
1733 verifyParameterAttrs(ArgAttrs, Ty, V);
1734
1735 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1736 Assert(!SawNest, "More than one parameter has attribute nest!", V);
1737 SawNest = true;
1738 }
1739
1740 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1741 Assert(!SawReturned, "More than one parameter has attribute returned!",
1742 V);
1743 Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1744 "Incompatible argument and return types for 'returned' attribute",
1745 V);
1746 SawReturned = true;
1747 }
1748
1749 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1750 Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1751 Assert(i == 0 || i == 1,
1752 "Attribute 'sret' is not on first or second parameter!", V);
1753 SawSRet = true;
1754 }
1755
1756 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1757 Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1758 SawSwiftSelf = true;
1759 }
1760
1761 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1762 Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1763 V);
1764 SawSwiftError = true;
1765 }
1766
1767 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1768 Assert(i == FT->getNumParams() - 1,
1769 "inalloca isn't on the last parameter!", V);
1770 }
1771 }
1772
1773 if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1774 return;
1775
1776 verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1777
1778 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1779 Attrs.hasFnAttribute(Attribute::ReadOnly)),
1780 "Attributes 'readnone and readonly' are incompatible!", V);
1781
1782 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1783 Attrs.hasFnAttribute(Attribute::WriteOnly)),
1784 "Attributes 'readnone and writeonly' are incompatible!", V);
1785
1786 Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1787 Attrs.hasFnAttribute(Attribute::WriteOnly)),
1788 "Attributes 'readonly and writeonly' are incompatible!", V);
1789
1790 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1791 Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1792 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1793 "incompatible!",
1794 V);
1795
1796 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1797 Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1798 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1799
1800 Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1801 Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1802 "Attributes 'noinline and alwaysinline' are incompatible!", V);
1803
1804 if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1805 Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1806 "Attribute 'optnone' requires 'noinline'!", V);
1807
1808 Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1809 "Attributes 'optsize and optnone' are incompatible!", V);
1810
1811 Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1812 "Attributes 'minsize and optnone' are incompatible!", V);
1813 }
1814
1815 if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1816 const GlobalValue *GV = cast<GlobalValue>(V);
1817 Assert(GV->hasGlobalUnnamedAddr(),
1818 "Attribute 'jumptable' requires 'unnamed_addr'", V);
1819 }
1820
1821 if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1822 std::pair<unsigned, Optional<unsigned>> Args =
1823 Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1824
1825 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1826 if (ParamNo >= FT->getNumParams()) {
1827 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1828 return false;
1829 }
1830
1831 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1832 CheckFailed("'allocsize' " + Name +
1833 " argument must refer to an integer parameter",
1834 V);
1835 return false;
1836 }
1837
1838 return true;
1839 };
1840
1841 if (!CheckParam("element size", Args.first))
1842 return;
1843
1844 if (Args.second && !CheckParam("number of elements", *Args.second))
1845 return;
1846 }
1847
1848 if (Attrs.hasFnAttribute("frame-pointer")) {
1849 StringRef FP = Attrs.getAttribute(AttributeList::FunctionIndex,
1850 "frame-pointer").getValueAsString();
1851 if (FP != "all" && FP != "non-leaf" && FP != "none")
1852 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
1853 }
1854
1855 if (Attrs.hasFnAttribute("patchable-function-prefix")) {
1856 StringRef S = Attrs
1857 .getAttribute(AttributeList::FunctionIndex,
1858 "patchable-function-prefix")
1859 .getValueAsString();
1860 unsigned N;
1861 if (S.getAsInteger(10, N))
1862 CheckFailed(
1863 "\"patchable-function-prefix\" takes an unsigned integer: " + S, V);
1864 }
1865 if (Attrs.hasFnAttribute("patchable-function-entry")) {
1866 StringRef S = Attrs
1867 .getAttribute(AttributeList::FunctionIndex,
1868 "patchable-function-entry")
1869 .getValueAsString();
1870 unsigned N;
1871 if (S.getAsInteger(10, N))
1872 CheckFailed(
1873 "\"patchable-function-entry\" takes an unsigned integer: " + S, V);
1874 }
1875 }
1876
verifyFunctionMetadata(ArrayRef<std::pair<unsigned,MDNode * >> MDs)1877 void Verifier::verifyFunctionMetadata(
1878 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1879 for (const auto &Pair : MDs) {
1880 if (Pair.first == LLVMContext::MD_prof) {
1881 MDNode *MD = Pair.second;
1882 Assert(MD->getNumOperands() >= 2,
1883 "!prof annotations should have no less than 2 operands", MD);
1884
1885 // Check first operand.
1886 Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1887 MD);
1888 Assert(isa<MDString>(MD->getOperand(0)),
1889 "expected string with name of the !prof annotation", MD);
1890 MDString *MDS = cast<MDString>(MD->getOperand(0));
1891 StringRef ProfName = MDS->getString();
1892 Assert(ProfName.equals("function_entry_count") ||
1893 ProfName.equals("synthetic_function_entry_count"),
1894 "first operand should be 'function_entry_count'"
1895 " or 'synthetic_function_entry_count'",
1896 MD);
1897
1898 // Check second operand.
1899 Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1900 MD);
1901 Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1902 "expected integer argument to function_entry_count", MD);
1903 }
1904 }
1905 }
1906
visitConstantExprsRecursively(const Constant * EntryC)1907 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1908 if (!ConstantExprVisited.insert(EntryC).second)
1909 return;
1910
1911 SmallVector<const Constant *, 16> Stack;
1912 Stack.push_back(EntryC);
1913
1914 while (!Stack.empty()) {
1915 const Constant *C = Stack.pop_back_val();
1916
1917 // Check this constant expression.
1918 if (const auto *CE = dyn_cast<ConstantExpr>(C))
1919 visitConstantExpr(CE);
1920
1921 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1922 // Global Values get visited separately, but we do need to make sure
1923 // that the global value is in the correct module
1924 Assert(GV->getParent() == &M, "Referencing global in another module!",
1925 EntryC, &M, GV, GV->getParent());
1926 continue;
1927 }
1928
1929 // Visit all sub-expressions.
1930 for (const Use &U : C->operands()) {
1931 const auto *OpC = dyn_cast<Constant>(U);
1932 if (!OpC)
1933 continue;
1934 if (!ConstantExprVisited.insert(OpC).second)
1935 continue;
1936 Stack.push_back(OpC);
1937 }
1938 }
1939 }
1940
visitConstantExpr(const ConstantExpr * CE)1941 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1942 if (CE->getOpcode() == Instruction::BitCast)
1943 Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1944 CE->getType()),
1945 "Invalid bitcast", CE);
1946
1947 if (CE->getOpcode() == Instruction::IntToPtr ||
1948 CE->getOpcode() == Instruction::PtrToInt) {
1949 auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1950 ? CE->getType()
1951 : CE->getOperand(0)->getType();
1952 StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1953 ? "inttoptr not supported for non-integral pointers"
1954 : "ptrtoint not supported for non-integral pointers";
1955 Assert(
1956 !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1957 Msg);
1958 }
1959 }
1960
verifyAttributeCount(AttributeList Attrs,unsigned Params)1961 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1962 // There shouldn't be more attribute sets than there are parameters plus the
1963 // function and return value.
1964 return Attrs.getNumAttrSets() <= Params + 2;
1965 }
1966
1967 /// Verify that statepoint intrinsic is well formed.
verifyStatepoint(const CallBase & Call)1968 void Verifier::verifyStatepoint(const CallBase &Call) {
1969 assert(Call.getCalledFunction() &&
1970 Call.getCalledFunction()->getIntrinsicID() ==
1971 Intrinsic::experimental_gc_statepoint);
1972
1973 Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
1974 !Call.onlyAccessesArgMemory(),
1975 "gc.statepoint must read and write all memory to preserve "
1976 "reordering restrictions required by safepoint semantics",
1977 Call);
1978
1979 const int64_t NumPatchBytes =
1980 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
1981 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1982 Assert(NumPatchBytes >= 0,
1983 "gc.statepoint number of patchable bytes must be "
1984 "positive",
1985 Call);
1986
1987 const Value *Target = Call.getArgOperand(2);
1988 auto *PT = dyn_cast<PointerType>(Target->getType());
1989 Assert(PT && PT->getElementType()->isFunctionTy(),
1990 "gc.statepoint callee must be of function pointer type", Call, Target);
1991 FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1992
1993 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
1994 Assert(NumCallArgs >= 0,
1995 "gc.statepoint number of arguments to underlying call "
1996 "must be positive",
1997 Call);
1998 const int NumParams = (int)TargetFuncType->getNumParams();
1999 if (TargetFuncType->isVarArg()) {
2000 Assert(NumCallArgs >= NumParams,
2001 "gc.statepoint mismatch in number of vararg call args", Call);
2002
2003 // TODO: Remove this limitation
2004 Assert(TargetFuncType->getReturnType()->isVoidTy(),
2005 "gc.statepoint doesn't support wrapping non-void "
2006 "vararg functions yet",
2007 Call);
2008 } else
2009 Assert(NumCallArgs == NumParams,
2010 "gc.statepoint mismatch in number of call args", Call);
2011
2012 const uint64_t Flags
2013 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2014 Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2015 "unknown flag used in gc.statepoint flags argument", Call);
2016
2017 // Verify that the types of the call parameter arguments match
2018 // the type of the wrapped callee.
2019 AttributeList Attrs = Call.getAttributes();
2020 for (int i = 0; i < NumParams; i++) {
2021 Type *ParamType = TargetFuncType->getParamType(i);
2022 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2023 Assert(ArgType == ParamType,
2024 "gc.statepoint call argument does not match wrapped "
2025 "function type",
2026 Call);
2027
2028 if (TargetFuncType->isVarArg()) {
2029 AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
2030 Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2031 "Attribute 'sret' cannot be used for vararg call arguments!",
2032 Call);
2033 }
2034 }
2035
2036 const int EndCallArgsInx = 4 + NumCallArgs;
2037
2038 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2039 Assert(isa<ConstantInt>(NumTransitionArgsV),
2040 "gc.statepoint number of transition arguments "
2041 "must be constant integer",
2042 Call);
2043 const int NumTransitionArgs =
2044 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2045 Assert(NumTransitionArgs >= 0,
2046 "gc.statepoint number of transition arguments must be positive", Call);
2047 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2048
2049 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2050 Assert(isa<ConstantInt>(NumDeoptArgsV),
2051 "gc.statepoint number of deoptimization arguments "
2052 "must be constant integer",
2053 Call);
2054 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2055 Assert(NumDeoptArgs >= 0,
2056 "gc.statepoint number of deoptimization arguments "
2057 "must be positive",
2058 Call);
2059
2060 const int ExpectedNumArgs =
2061 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2062 Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2063 "gc.statepoint too few arguments according to length fields", Call);
2064
2065 // Check that the only uses of this gc.statepoint are gc.result or
2066 // gc.relocate calls which are tied to this statepoint and thus part
2067 // of the same statepoint sequence
2068 for (const User *U : Call.users()) {
2069 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2070 Assert(UserCall, "illegal use of statepoint token", Call, U);
2071 if (!UserCall)
2072 continue;
2073 Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2074 "gc.result or gc.relocate are the only value uses "
2075 "of a gc.statepoint",
2076 Call, U);
2077 if (isa<GCResultInst>(UserCall)) {
2078 Assert(UserCall->getArgOperand(0) == &Call,
2079 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2080 } else if (isa<GCRelocateInst>(Call)) {
2081 Assert(UserCall->getArgOperand(0) == &Call,
2082 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2083 }
2084 }
2085
2086 // Note: It is legal for a single derived pointer to be listed multiple
2087 // times. It's non-optimal, but it is legal. It can also happen after
2088 // insertion if we strip a bitcast away.
2089 // Note: It is really tempting to check that each base is relocated and
2090 // that a derived pointer is never reused as a base pointer. This turns
2091 // out to be problematic since optimizations run after safepoint insertion
2092 // can recognize equality properties that the insertion logic doesn't know
2093 // about. See example statepoint.ll in the verifier subdirectory
2094 }
2095
verifyFrameRecoverIndices()2096 void Verifier::verifyFrameRecoverIndices() {
2097 for (auto &Counts : FrameEscapeInfo) {
2098 Function *F = Counts.first;
2099 unsigned EscapedObjectCount = Counts.second.first;
2100 unsigned MaxRecoveredIndex = Counts.second.second;
2101 Assert(MaxRecoveredIndex <= EscapedObjectCount,
2102 "all indices passed to llvm.localrecover must be less than the "
2103 "number of arguments passed to llvm.localescape in the parent "
2104 "function",
2105 F);
2106 }
2107 }
2108
getSuccPad(Instruction * Terminator)2109 static Instruction *getSuccPad(Instruction *Terminator) {
2110 BasicBlock *UnwindDest;
2111 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2112 UnwindDest = II->getUnwindDest();
2113 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2114 UnwindDest = CSI->getUnwindDest();
2115 else
2116 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2117 return UnwindDest->getFirstNonPHI();
2118 }
2119
verifySiblingFuncletUnwinds()2120 void Verifier::verifySiblingFuncletUnwinds() {
2121 SmallPtrSet<Instruction *, 8> Visited;
2122 SmallPtrSet<Instruction *, 8> Active;
2123 for (const auto &Pair : SiblingFuncletInfo) {
2124 Instruction *PredPad = Pair.first;
2125 if (Visited.count(PredPad))
2126 continue;
2127 Active.insert(PredPad);
2128 Instruction *Terminator = Pair.second;
2129 do {
2130 Instruction *SuccPad = getSuccPad(Terminator);
2131 if (Active.count(SuccPad)) {
2132 // Found a cycle; report error
2133 Instruction *CyclePad = SuccPad;
2134 SmallVector<Instruction *, 8> CycleNodes;
2135 do {
2136 CycleNodes.push_back(CyclePad);
2137 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2138 if (CycleTerminator != CyclePad)
2139 CycleNodes.push_back(CycleTerminator);
2140 CyclePad = getSuccPad(CycleTerminator);
2141 } while (CyclePad != SuccPad);
2142 Assert(false, "EH pads can't handle each other's exceptions",
2143 ArrayRef<Instruction *>(CycleNodes));
2144 }
2145 // Don't re-walk a node we've already checked
2146 if (!Visited.insert(SuccPad).second)
2147 break;
2148 // Walk to this successor if it has a map entry.
2149 PredPad = SuccPad;
2150 auto TermI = SiblingFuncletInfo.find(PredPad);
2151 if (TermI == SiblingFuncletInfo.end())
2152 break;
2153 Terminator = TermI->second;
2154 Active.insert(PredPad);
2155 } while (true);
2156 // Each node only has one successor, so we've walked all the active
2157 // nodes' successors.
2158 Active.clear();
2159 }
2160 }
2161
2162 // visitFunction - Verify that a function is ok.
2163 //
visitFunction(const Function & F)2164 void Verifier::visitFunction(const Function &F) {
2165 visitGlobalValue(F);
2166
2167 // Check function arguments.
2168 FunctionType *FT = F.getFunctionType();
2169 unsigned NumArgs = F.arg_size();
2170
2171 Assert(&Context == &F.getContext(),
2172 "Function context does not match Module context!", &F);
2173
2174 Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2175 Assert(FT->getNumParams() == NumArgs,
2176 "# formal arguments must match # of arguments for function type!", &F,
2177 FT);
2178 Assert(F.getReturnType()->isFirstClassType() ||
2179 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2180 "Functions cannot return aggregate values!", &F);
2181
2182 Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2183 "Invalid struct return type!", &F);
2184
2185 AttributeList Attrs = F.getAttributes();
2186
2187 Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2188 "Attribute after last parameter!", &F);
2189
2190 bool isLLVMdotName = F.getName().size() >= 5 &&
2191 F.getName().substr(0, 5) == "llvm.";
2192
2193 // Check function attributes.
2194 verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName);
2195
2196 // On function declarations/definitions, we do not support the builtin
2197 // attribute. We do not check this in VerifyFunctionAttrs since that is
2198 // checking for Attributes that can/can not ever be on functions.
2199 Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2200 "Attribute 'builtin' can only be applied to a callsite.", &F);
2201
2202 // Check that this function meets the restrictions on this calling convention.
2203 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2204 // restrictions can be lifted.
2205 switch (F.getCallingConv()) {
2206 default:
2207 case CallingConv::C:
2208 break;
2209 case CallingConv::AMDGPU_KERNEL:
2210 case CallingConv::SPIR_KERNEL:
2211 Assert(F.getReturnType()->isVoidTy(),
2212 "Calling convention requires void return type", &F);
2213 LLVM_FALLTHROUGH;
2214 case CallingConv::AMDGPU_VS:
2215 case CallingConv::AMDGPU_HS:
2216 case CallingConv::AMDGPU_GS:
2217 case CallingConv::AMDGPU_PS:
2218 case CallingConv::AMDGPU_CS:
2219 Assert(!F.hasStructRetAttr(),
2220 "Calling convention does not allow sret", &F);
2221 LLVM_FALLTHROUGH;
2222 case CallingConv::Fast:
2223 case CallingConv::Cold:
2224 case CallingConv::Intel_OCL_BI:
2225 case CallingConv::PTX_Kernel:
2226 case CallingConv::PTX_Device:
2227 Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2228 "perfect forwarding!",
2229 &F);
2230 break;
2231 }
2232
2233 // Check that the argument values match the function type for this function...
2234 unsigned i = 0;
2235 for (const Argument &Arg : F.args()) {
2236 Assert(Arg.getType() == FT->getParamType(i),
2237 "Argument value does not match function argument type!", &Arg,
2238 FT->getParamType(i));
2239 Assert(Arg.getType()->isFirstClassType(),
2240 "Function arguments must have first-class types!", &Arg);
2241 if (!isLLVMdotName) {
2242 Assert(!Arg.getType()->isMetadataTy(),
2243 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2244 Assert(!Arg.getType()->isTokenTy(),
2245 "Function takes token but isn't an intrinsic", &Arg, &F);
2246 }
2247
2248 // Check that swifterror argument is only used by loads and stores.
2249 if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2250 verifySwiftErrorValue(&Arg);
2251 }
2252 ++i;
2253 }
2254
2255 if (!isLLVMdotName)
2256 Assert(!F.getReturnType()->isTokenTy(),
2257 "Functions returns a token but isn't an intrinsic", &F);
2258
2259 // Get the function metadata attachments.
2260 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2261 F.getAllMetadata(MDs);
2262 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2263 verifyFunctionMetadata(MDs);
2264
2265 // Check validity of the personality function
2266 if (F.hasPersonalityFn()) {
2267 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2268 if (Per)
2269 Assert(Per->getParent() == F.getParent(),
2270 "Referencing personality function in another module!",
2271 &F, F.getParent(), Per, Per->getParent());
2272 }
2273
2274 if (F.isMaterializable()) {
2275 // Function has a body somewhere we can't see.
2276 Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2277 MDs.empty() ? nullptr : MDs.front().second);
2278 } else if (F.isDeclaration()) {
2279 for (const auto &I : MDs) {
2280 // This is used for call site debug information.
2281 AssertDI(I.first != LLVMContext::MD_dbg ||
2282 !cast<DISubprogram>(I.second)->isDistinct(),
2283 "function declaration may only have a unique !dbg attachment",
2284 &F);
2285 Assert(I.first != LLVMContext::MD_prof,
2286 "function declaration may not have a !prof attachment", &F);
2287
2288 // Verify the metadata itself.
2289 visitMDNode(*I.second);
2290 }
2291 Assert(!F.hasPersonalityFn(),
2292 "Function declaration shouldn't have a personality routine", &F);
2293 } else {
2294 // Verify that this function (which has a body) is not named "llvm.*". It
2295 // is not legal to define intrinsics.
2296 Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2297
2298 // Check the entry node
2299 const BasicBlock *Entry = &F.getEntryBlock();
2300 Assert(pred_empty(Entry),
2301 "Entry block to function must not have predecessors!", Entry);
2302
2303 // The address of the entry block cannot be taken, unless it is dead.
2304 if (Entry->hasAddressTaken()) {
2305 Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2306 "blockaddress may not be used with the entry block!", Entry);
2307 }
2308
2309 unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2310 // Visit metadata attachments.
2311 for (const auto &I : MDs) {
2312 // Verify that the attachment is legal.
2313 switch (I.first) {
2314 default:
2315 break;
2316 case LLVMContext::MD_dbg: {
2317 ++NumDebugAttachments;
2318 AssertDI(NumDebugAttachments == 1,
2319 "function must have a single !dbg attachment", &F, I.second);
2320 AssertDI(isa<DISubprogram>(I.second),
2321 "function !dbg attachment must be a subprogram", &F, I.second);
2322 auto *SP = cast<DISubprogram>(I.second);
2323 const Function *&AttachedTo = DISubprogramAttachments[SP];
2324 AssertDI(!AttachedTo || AttachedTo == &F,
2325 "DISubprogram attached to more than one function", SP, &F);
2326 AttachedTo = &F;
2327 break;
2328 }
2329 case LLVMContext::MD_prof:
2330 ++NumProfAttachments;
2331 Assert(NumProfAttachments == 1,
2332 "function must have a single !prof attachment", &F, I.second);
2333 break;
2334 }
2335
2336 // Verify the metadata itself.
2337 visitMDNode(*I.second);
2338 }
2339 }
2340
2341 // If this function is actually an intrinsic, verify that it is only used in
2342 // direct call/invokes, never having its "address taken".
2343 // Only do this if the module is materialized, otherwise we don't have all the
2344 // uses.
2345 if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2346 const User *U;
2347 if (F.hasAddressTaken(&U))
2348 Assert(false, "Invalid user of intrinsic instruction!", U);
2349 }
2350
2351 auto *N = F.getSubprogram();
2352 HasDebugInfo = (N != nullptr);
2353 if (!HasDebugInfo)
2354 return;
2355
2356 // Check that all !dbg attachments lead to back to N (or, at least, another
2357 // subprogram that describes the same function).
2358 //
2359 // FIXME: Check this incrementally while visiting !dbg attachments.
2360 // FIXME: Only check when N is the canonical subprogram for F.
2361 SmallPtrSet<const MDNode *, 32> Seen;
2362 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2363 // Be careful about using DILocation here since we might be dealing with
2364 // broken code (this is the Verifier after all).
2365 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2366 if (!DL)
2367 return;
2368 if (!Seen.insert(DL).second)
2369 return;
2370
2371 Metadata *Parent = DL->getRawScope();
2372 AssertDI(Parent && isa<DILocalScope>(Parent),
2373 "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2374 Parent);
2375 DILocalScope *Scope = DL->getInlinedAtScope();
2376 if (Scope && !Seen.insert(Scope).second)
2377 return;
2378
2379 DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2380
2381 // Scope and SP could be the same MDNode and we don't want to skip
2382 // validation in that case
2383 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2384 return;
2385
2386 // FIXME: Once N is canonical, check "SP == &N".
2387 AssertDI(SP->describes(&F),
2388 "!dbg attachment points at wrong subprogram for function", N, &F,
2389 &I, DL, Scope, SP);
2390 };
2391 for (auto &BB : F)
2392 for (auto &I : BB) {
2393 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2394 // The llvm.loop annotations also contain two DILocations.
2395 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2396 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2397 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2398 if (BrokenDebugInfo)
2399 return;
2400 }
2401 }
2402
2403 // verifyBasicBlock - Verify that a basic block is well formed...
2404 //
visitBasicBlock(BasicBlock & BB)2405 void Verifier::visitBasicBlock(BasicBlock &BB) {
2406 InstsInThisBlock.clear();
2407
2408 // Ensure that basic blocks have terminators!
2409 Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2410
2411 // Check constraints that this basic block imposes on all of the PHI nodes in
2412 // it.
2413 if (isa<PHINode>(BB.front())) {
2414 SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2415 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2416 llvm::sort(Preds);
2417 for (const PHINode &PN : BB.phis()) {
2418 // Ensure that PHI nodes have at least one entry!
2419 Assert(PN.getNumIncomingValues() != 0,
2420 "PHI nodes must have at least one entry. If the block is dead, "
2421 "the PHI should be removed!",
2422 &PN);
2423 Assert(PN.getNumIncomingValues() == Preds.size(),
2424 "PHINode should have one entry for each predecessor of its "
2425 "parent basic block!",
2426 &PN);
2427
2428 // Get and sort all incoming values in the PHI node...
2429 Values.clear();
2430 Values.reserve(PN.getNumIncomingValues());
2431 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2432 Values.push_back(
2433 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2434 llvm::sort(Values);
2435
2436 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2437 // Check to make sure that if there is more than one entry for a
2438 // particular basic block in this PHI node, that the incoming values are
2439 // all identical.
2440 //
2441 Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2442 Values[i].second == Values[i - 1].second,
2443 "PHI node has multiple entries for the same basic block with "
2444 "different incoming values!",
2445 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2446
2447 // Check to make sure that the predecessors and PHI node entries are
2448 // matched up.
2449 Assert(Values[i].first == Preds[i],
2450 "PHI node entries do not match predecessors!", &PN,
2451 Values[i].first, Preds[i]);
2452 }
2453 }
2454 }
2455
2456 // Check that all instructions have their parent pointers set up correctly.
2457 for (auto &I : BB)
2458 {
2459 Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2460 }
2461 }
2462
visitTerminator(Instruction & I)2463 void Verifier::visitTerminator(Instruction &I) {
2464 // Ensure that terminators only exist at the end of the basic block.
2465 Assert(&I == I.getParent()->getTerminator(),
2466 "Terminator found in the middle of a basic block!", I.getParent());
2467 visitInstruction(I);
2468 }
2469
visitBranchInst(BranchInst & BI)2470 void Verifier::visitBranchInst(BranchInst &BI) {
2471 if (BI.isConditional()) {
2472 Assert(BI.getCondition()->getType()->isIntegerTy(1),
2473 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2474 }
2475 visitTerminator(BI);
2476 }
2477
visitReturnInst(ReturnInst & RI)2478 void Verifier::visitReturnInst(ReturnInst &RI) {
2479 Function *F = RI.getParent()->getParent();
2480 unsigned N = RI.getNumOperands();
2481 if (F->getReturnType()->isVoidTy())
2482 Assert(N == 0,
2483 "Found return instr that returns non-void in Function of void "
2484 "return type!",
2485 &RI, F->getReturnType());
2486 else
2487 Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2488 "Function return type does not match operand "
2489 "type of return inst!",
2490 &RI, F->getReturnType());
2491
2492 // Check to make sure that the return value has necessary properties for
2493 // terminators...
2494 visitTerminator(RI);
2495 }
2496
visitSwitchInst(SwitchInst & SI)2497 void Verifier::visitSwitchInst(SwitchInst &SI) {
2498 // Check to make sure that all of the constants in the switch instruction
2499 // have the same type as the switched-on value.
2500 Type *SwitchTy = SI.getCondition()->getType();
2501 SmallPtrSet<ConstantInt*, 32> Constants;
2502 for (auto &Case : SI.cases()) {
2503 Assert(Case.getCaseValue()->getType() == SwitchTy,
2504 "Switch constants must all be same type as switch value!", &SI);
2505 Assert(Constants.insert(Case.getCaseValue()).second,
2506 "Duplicate integer as switch case", &SI, Case.getCaseValue());
2507 }
2508
2509 visitTerminator(SI);
2510 }
2511
visitIndirectBrInst(IndirectBrInst & BI)2512 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2513 Assert(BI.getAddress()->getType()->isPointerTy(),
2514 "Indirectbr operand must have pointer type!", &BI);
2515 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2516 Assert(BI.getDestination(i)->getType()->isLabelTy(),
2517 "Indirectbr destinations must all have pointer type!", &BI);
2518
2519 visitTerminator(BI);
2520 }
2521
visitCallBrInst(CallBrInst & CBI)2522 void Verifier::visitCallBrInst(CallBrInst &CBI) {
2523 Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
2524 &CBI);
2525 Assert(CBI.getType()->isVoidTy(), "Callbr return value is not supported!",
2526 &CBI);
2527 for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
2528 Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
2529 "Callbr successors must all have pointer type!", &CBI);
2530 for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
2531 Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
2532 "Using an unescaped label as a callbr argument!", &CBI);
2533 if (isa<BasicBlock>(CBI.getOperand(i)))
2534 for (unsigned j = i + 1; j != e; ++j)
2535 Assert(CBI.getOperand(i) != CBI.getOperand(j),
2536 "Duplicate callbr destination!", &CBI);
2537 }
2538 {
2539 SmallPtrSet<BasicBlock *, 4> ArgBBs;
2540 for (Value *V : CBI.args())
2541 if (auto *BA = dyn_cast<BlockAddress>(V))
2542 ArgBBs.insert(BA->getBasicBlock());
2543 for (BasicBlock *BB : CBI.getIndirectDests())
2544 Assert(ArgBBs.find(BB) != ArgBBs.end(),
2545 "Indirect label missing from arglist.", &CBI);
2546 }
2547
2548 visitTerminator(CBI);
2549 }
2550
visitSelectInst(SelectInst & SI)2551 void Verifier::visitSelectInst(SelectInst &SI) {
2552 Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2553 SI.getOperand(2)),
2554 "Invalid operands for select instruction!", &SI);
2555
2556 Assert(SI.getTrueValue()->getType() == SI.getType(),
2557 "Select values must have same type as select instruction!", &SI);
2558 visitInstruction(SI);
2559 }
2560
2561 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2562 /// a pass, if any exist, it's an error.
2563 ///
visitUserOp1(Instruction & I)2564 void Verifier::visitUserOp1(Instruction &I) {
2565 Assert(false, "User-defined operators should not live outside of a pass!", &I);
2566 }
2567
visitTruncInst(TruncInst & I)2568 void Verifier::visitTruncInst(TruncInst &I) {
2569 // Get the source and destination types
2570 Type *SrcTy = I.getOperand(0)->getType();
2571 Type *DestTy = I.getType();
2572
2573 // Get the size of the types in bits, we'll need this later
2574 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2575 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2576
2577 Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2578 Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2579 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2580 "trunc source and destination must both be a vector or neither", &I);
2581 Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2582
2583 visitInstruction(I);
2584 }
2585
visitZExtInst(ZExtInst & I)2586 void Verifier::visitZExtInst(ZExtInst &I) {
2587 // Get the source and destination types
2588 Type *SrcTy = I.getOperand(0)->getType();
2589 Type *DestTy = I.getType();
2590
2591 // Get the size of the types in bits, we'll need this later
2592 Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2593 Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2594 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2595 "zext source and destination must both be a vector or neither", &I);
2596 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2597 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2598
2599 Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2600
2601 visitInstruction(I);
2602 }
2603
visitSExtInst(SExtInst & I)2604 void Verifier::visitSExtInst(SExtInst &I) {
2605 // Get the source and destination types
2606 Type *SrcTy = I.getOperand(0)->getType();
2607 Type *DestTy = I.getType();
2608
2609 // Get the size of the types in bits, we'll need this later
2610 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2611 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2612
2613 Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2614 Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2615 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2616 "sext source and destination must both be a vector or neither", &I);
2617 Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2618
2619 visitInstruction(I);
2620 }
2621
visitFPTruncInst(FPTruncInst & I)2622 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2623 // Get the source and destination types
2624 Type *SrcTy = I.getOperand(0)->getType();
2625 Type *DestTy = I.getType();
2626 // Get the size of the types in bits, we'll need this later
2627 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2628 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2629
2630 Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2631 Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2632 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2633 "fptrunc source and destination must both be a vector or neither", &I);
2634 Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2635
2636 visitInstruction(I);
2637 }
2638
visitFPExtInst(FPExtInst & I)2639 void Verifier::visitFPExtInst(FPExtInst &I) {
2640 // Get the source and destination types
2641 Type *SrcTy = I.getOperand(0)->getType();
2642 Type *DestTy = I.getType();
2643
2644 // Get the size of the types in bits, we'll need this later
2645 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2646 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2647
2648 Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2649 Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2650 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2651 "fpext source and destination must both be a vector or neither", &I);
2652 Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2653
2654 visitInstruction(I);
2655 }
2656
visitUIToFPInst(UIToFPInst & I)2657 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2658 // Get the source and destination types
2659 Type *SrcTy = I.getOperand(0)->getType();
2660 Type *DestTy = I.getType();
2661
2662 bool SrcVec = SrcTy->isVectorTy();
2663 bool DstVec = DestTy->isVectorTy();
2664
2665 Assert(SrcVec == DstVec,
2666 "UIToFP source and dest must both be vector or scalar", &I);
2667 Assert(SrcTy->isIntOrIntVectorTy(),
2668 "UIToFP source must be integer or integer vector", &I);
2669 Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2670 &I);
2671
2672 if (SrcVec && DstVec)
2673 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2674 cast<VectorType>(DestTy)->getNumElements(),
2675 "UIToFP source and dest vector length mismatch", &I);
2676
2677 visitInstruction(I);
2678 }
2679
visitSIToFPInst(SIToFPInst & I)2680 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2681 // Get the source and destination types
2682 Type *SrcTy = I.getOperand(0)->getType();
2683 Type *DestTy = I.getType();
2684
2685 bool SrcVec = SrcTy->isVectorTy();
2686 bool DstVec = DestTy->isVectorTy();
2687
2688 Assert(SrcVec == DstVec,
2689 "SIToFP source and dest must both be vector or scalar", &I);
2690 Assert(SrcTy->isIntOrIntVectorTy(),
2691 "SIToFP source must be integer or integer vector", &I);
2692 Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2693 &I);
2694
2695 if (SrcVec && DstVec)
2696 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2697 cast<VectorType>(DestTy)->getNumElements(),
2698 "SIToFP source and dest vector length mismatch", &I);
2699
2700 visitInstruction(I);
2701 }
2702
visitFPToUIInst(FPToUIInst & I)2703 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2704 // Get the source and destination types
2705 Type *SrcTy = I.getOperand(0)->getType();
2706 Type *DestTy = I.getType();
2707
2708 bool SrcVec = SrcTy->isVectorTy();
2709 bool DstVec = DestTy->isVectorTy();
2710
2711 Assert(SrcVec == DstVec,
2712 "FPToUI source and dest must both be vector or scalar", &I);
2713 Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2714 &I);
2715 Assert(DestTy->isIntOrIntVectorTy(),
2716 "FPToUI result must be integer or integer vector", &I);
2717
2718 if (SrcVec && DstVec)
2719 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2720 cast<VectorType>(DestTy)->getNumElements(),
2721 "FPToUI source and dest vector length mismatch", &I);
2722
2723 visitInstruction(I);
2724 }
2725
visitFPToSIInst(FPToSIInst & I)2726 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2727 // Get the source and destination types
2728 Type *SrcTy = I.getOperand(0)->getType();
2729 Type *DestTy = I.getType();
2730
2731 bool SrcVec = SrcTy->isVectorTy();
2732 bool DstVec = DestTy->isVectorTy();
2733
2734 Assert(SrcVec == DstVec,
2735 "FPToSI source and dest must both be vector or scalar", &I);
2736 Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2737 &I);
2738 Assert(DestTy->isIntOrIntVectorTy(),
2739 "FPToSI result must be integer or integer vector", &I);
2740
2741 if (SrcVec && DstVec)
2742 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2743 cast<VectorType>(DestTy)->getNumElements(),
2744 "FPToSI source and dest vector length mismatch", &I);
2745
2746 visitInstruction(I);
2747 }
2748
visitPtrToIntInst(PtrToIntInst & I)2749 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2750 // Get the source and destination types
2751 Type *SrcTy = I.getOperand(0)->getType();
2752 Type *DestTy = I.getType();
2753
2754 Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2755
2756 if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2757 Assert(!DL.isNonIntegralPointerType(PTy),
2758 "ptrtoint not supported for non-integral pointers");
2759
2760 Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2761 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2762 &I);
2763
2764 if (SrcTy->isVectorTy()) {
2765 VectorType *VSrc = cast<VectorType>(SrcTy);
2766 VectorType *VDest = cast<VectorType>(DestTy);
2767 Assert(VSrc->getNumElements() == VDest->getNumElements(),
2768 "PtrToInt Vector width mismatch", &I);
2769 }
2770
2771 visitInstruction(I);
2772 }
2773
visitIntToPtrInst(IntToPtrInst & I)2774 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2775 // Get the source and destination types
2776 Type *SrcTy = I.getOperand(0)->getType();
2777 Type *DestTy = I.getType();
2778
2779 Assert(SrcTy->isIntOrIntVectorTy(),
2780 "IntToPtr source must be an integral", &I);
2781 Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2782
2783 if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2784 Assert(!DL.isNonIntegralPointerType(PTy),
2785 "inttoptr not supported for non-integral pointers");
2786
2787 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2788 &I);
2789 if (SrcTy->isVectorTy()) {
2790 VectorType *VSrc = cast<VectorType>(SrcTy);
2791 VectorType *VDest = cast<VectorType>(DestTy);
2792 Assert(VSrc->getNumElements() == VDest->getNumElements(),
2793 "IntToPtr Vector width mismatch", &I);
2794 }
2795 visitInstruction(I);
2796 }
2797
visitBitCastInst(BitCastInst & I)2798 void Verifier::visitBitCastInst(BitCastInst &I) {
2799 Assert(
2800 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2801 "Invalid bitcast", &I);
2802 visitInstruction(I);
2803 }
2804
visitAddrSpaceCastInst(AddrSpaceCastInst & I)2805 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2806 Type *SrcTy = I.getOperand(0)->getType();
2807 Type *DestTy = I.getType();
2808
2809 Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2810 &I);
2811 Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2812 &I);
2813 Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2814 "AddrSpaceCast must be between different address spaces", &I);
2815 if (SrcTy->isVectorTy())
2816 Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2817 "AddrSpaceCast vector pointer number of elements mismatch", &I);
2818 visitInstruction(I);
2819 }
2820
2821 /// visitPHINode - Ensure that a PHI node is well formed.
2822 ///
visitPHINode(PHINode & PN)2823 void Verifier::visitPHINode(PHINode &PN) {
2824 // Ensure that the PHI nodes are all grouped together at the top of the block.
2825 // This can be tested by checking whether the instruction before this is
2826 // either nonexistent (because this is begin()) or is a PHI node. If not,
2827 // then there is some other instruction before a PHI.
2828 Assert(&PN == &PN.getParent()->front() ||
2829 isa<PHINode>(--BasicBlock::iterator(&PN)),
2830 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2831
2832 // Check that a PHI doesn't yield a Token.
2833 Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2834
2835 // Check that all of the values of the PHI node have the same type as the
2836 // result, and that the incoming blocks are really basic blocks.
2837 for (Value *IncValue : PN.incoming_values()) {
2838 Assert(PN.getType() == IncValue->getType(),
2839 "PHI node operands are not the same type as the result!", &PN);
2840 }
2841
2842 // All other PHI node constraints are checked in the visitBasicBlock method.
2843
2844 visitInstruction(PN);
2845 }
2846
visitCallBase(CallBase & Call)2847 void Verifier::visitCallBase(CallBase &Call) {
2848 Assert(Call.getCalledValue()->getType()->isPointerTy(),
2849 "Called function must be a pointer!", Call);
2850 PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
2851
2852 Assert(FPTy->getElementType()->isFunctionTy(),
2853 "Called function is not pointer to function type!", Call);
2854
2855 Assert(FPTy->getElementType() == Call.getFunctionType(),
2856 "Called function is not the same type as the call!", Call);
2857
2858 FunctionType *FTy = Call.getFunctionType();
2859
2860 // Verify that the correct number of arguments are being passed
2861 if (FTy->isVarArg())
2862 Assert(Call.arg_size() >= FTy->getNumParams(),
2863 "Called function requires more parameters than were provided!",
2864 Call);
2865 else
2866 Assert(Call.arg_size() == FTy->getNumParams(),
2867 "Incorrect number of arguments passed to called function!", Call);
2868
2869 // Verify that all arguments to the call match the function type.
2870 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2871 Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2872 "Call parameter type does not match function signature!",
2873 Call.getArgOperand(i), FTy->getParamType(i), Call);
2874
2875 AttributeList Attrs = Call.getAttributes();
2876
2877 Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2878 "Attribute after last parameter!", Call);
2879
2880 bool IsIntrinsic = Call.getCalledFunction() &&
2881 Call.getCalledFunction()->getName().startswith("llvm.");
2882
2883 Function *Callee
2884 = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
2885
2886 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2887 // Don't allow speculatable on call sites, unless the underlying function
2888 // declaration is also speculatable.
2889 Assert(Callee && Callee->isSpeculatable(),
2890 "speculatable attribute may not apply to call sites", Call);
2891 }
2892
2893 // Verify call attributes.
2894 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
2895
2896 // Conservatively check the inalloca argument.
2897 // We have a bug if we can find that there is an underlying alloca without
2898 // inalloca.
2899 if (Call.hasInAllocaArgument()) {
2900 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2901 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2902 Assert(AI->isUsedWithInAlloca(),
2903 "inalloca argument for call has mismatched alloca", AI, Call);
2904 }
2905
2906 // For each argument of the callsite, if it has the swifterror argument,
2907 // make sure the underlying alloca/parameter it comes from has a swifterror as
2908 // well.
2909 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
2910 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2911 Value *SwiftErrorArg = Call.getArgOperand(i);
2912 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2913 Assert(AI->isSwiftError(),
2914 "swifterror argument for call has mismatched alloca", AI, Call);
2915 continue;
2916 }
2917 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2918 Assert(ArgI,
2919 "swifterror argument should come from an alloca or parameter",
2920 SwiftErrorArg, Call);
2921 Assert(ArgI->hasSwiftErrorAttr(),
2922 "swifterror argument for call has mismatched parameter", ArgI,
2923 Call);
2924 }
2925
2926 if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) {
2927 // Don't allow immarg on call sites, unless the underlying declaration
2928 // also has the matching immarg.
2929 Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
2930 "immarg may not apply only to call sites",
2931 Call.getArgOperand(i), Call);
2932 }
2933
2934 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
2935 Value *ArgVal = Call.getArgOperand(i);
2936 Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
2937 "immarg operand has non-immediate parameter", ArgVal, Call);
2938 }
2939 }
2940
2941 if (FTy->isVarArg()) {
2942 // FIXME? is 'nest' even legal here?
2943 bool SawNest = false;
2944 bool SawReturned = false;
2945
2946 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2947 if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2948 SawNest = true;
2949 if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2950 SawReturned = true;
2951 }
2952
2953 // Check attributes on the varargs part.
2954 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
2955 Type *Ty = Call.getArgOperand(Idx)->getType();
2956 AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2957 verifyParameterAttrs(ArgAttrs, Ty, &Call);
2958
2959 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2960 Assert(!SawNest, "More than one parameter has attribute nest!", Call);
2961 SawNest = true;
2962 }
2963
2964 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2965 Assert(!SawReturned, "More than one parameter has attribute returned!",
2966 Call);
2967 Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2968 "Incompatible argument and return types for 'returned' "
2969 "attribute",
2970 Call);
2971 SawReturned = true;
2972 }
2973
2974 // Statepoint intrinsic is vararg but the wrapped function may be not.
2975 // Allow sret here and check the wrapped function in verifyStatepoint.
2976 if (!Call.getCalledFunction() ||
2977 Call.getCalledFunction()->getIntrinsicID() !=
2978 Intrinsic::experimental_gc_statepoint)
2979 Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2980 "Attribute 'sret' cannot be used for vararg call arguments!",
2981 Call);
2982
2983 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2984 Assert(Idx == Call.arg_size() - 1,
2985 "inalloca isn't on the last argument!", Call);
2986 }
2987 }
2988
2989 // Verify that there's no metadata unless it's a direct call to an intrinsic.
2990 if (!IsIntrinsic) {
2991 for (Type *ParamTy : FTy->params()) {
2992 Assert(!ParamTy->isMetadataTy(),
2993 "Function has metadata parameter but isn't an intrinsic", Call);
2994 Assert(!ParamTy->isTokenTy(),
2995 "Function has token parameter but isn't an intrinsic", Call);
2996 }
2997 }
2998
2999 // Verify that indirect calls don't return tokens.
3000 if (!Call.getCalledFunction())
3001 Assert(!FTy->getReturnType()->isTokenTy(),
3002 "Return type cannot be token for indirect call!");
3003
3004 if (Function *F = Call.getCalledFunction())
3005 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3006 visitIntrinsicCall(ID, Call);
3007
3008 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3009 // most one "gc-transition", and at most one "cfguardtarget" operand bundle.
3010 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3011 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false;
3012 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3013 OperandBundleUse BU = Call.getOperandBundleAt(i);
3014 uint32_t Tag = BU.getTagID();
3015 if (Tag == LLVMContext::OB_deopt) {
3016 Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3017 FoundDeoptBundle = true;
3018 } else if (Tag == LLVMContext::OB_gc_transition) {
3019 Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3020 Call);
3021 FoundGCTransitionBundle = true;
3022 } else if (Tag == LLVMContext::OB_funclet) {
3023 Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3024 FoundFuncletBundle = true;
3025 Assert(BU.Inputs.size() == 1,
3026 "Expected exactly one funclet bundle operand", Call);
3027 Assert(isa<FuncletPadInst>(BU.Inputs.front()),
3028 "Funclet bundle operands should correspond to a FuncletPadInst",
3029 Call);
3030 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3031 Assert(!FoundCFGuardTargetBundle,
3032 "Multiple CFGuardTarget operand bundles", Call);
3033 FoundCFGuardTargetBundle = true;
3034 Assert(BU.Inputs.size() == 1,
3035 "Expected exactly one cfguardtarget bundle operand", Call);
3036 }
3037 }
3038
3039 // Verify that each inlinable callsite of a debug-info-bearing function in a
3040 // debug-info-bearing function has a debug location attached to it. Failure to
3041 // do so causes assertion failures when the inliner sets up inline scope info.
3042 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3043 Call.getCalledFunction()->getSubprogram())
3044 AssertDI(Call.getDebugLoc(),
3045 "inlinable function call in a function with "
3046 "debug info must have a !dbg location",
3047 Call);
3048
3049 visitInstruction(Call);
3050 }
3051
3052 /// Two types are "congruent" if they are identical, or if they are both pointer
3053 /// types with different pointee types and the same address space.
isTypeCongruent(Type * L,Type * R)3054 static bool isTypeCongruent(Type *L, Type *R) {
3055 if (L == R)
3056 return true;
3057 PointerType *PL = dyn_cast<PointerType>(L);
3058 PointerType *PR = dyn_cast<PointerType>(R);
3059 if (!PL || !PR)
3060 return false;
3061 return PL->getAddressSpace() == PR->getAddressSpace();
3062 }
3063
getParameterABIAttributes(int I,AttributeList Attrs)3064 static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
3065 static const Attribute::AttrKind ABIAttrs[] = {
3066 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3067 Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
3068 Attribute::SwiftError};
3069 AttrBuilder Copy;
3070 for (auto AK : ABIAttrs) {
3071 if (Attrs.hasParamAttribute(I, AK))
3072 Copy.addAttribute(AK);
3073 }
3074 if (Attrs.hasParamAttribute(I, Attribute::Alignment))
3075 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3076 return Copy;
3077 }
3078
verifyMustTailCall(CallInst & CI)3079 void Verifier::verifyMustTailCall(CallInst &CI) {
3080 Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3081
3082 // - The caller and callee prototypes must match. Pointer types of
3083 // parameters or return types may differ in pointee type, but not
3084 // address space.
3085 Function *F = CI.getParent()->getParent();
3086 FunctionType *CallerTy = F->getFunctionType();
3087 FunctionType *CalleeTy = CI.getFunctionType();
3088 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3089 Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3090 "cannot guarantee tail call due to mismatched parameter counts",
3091 &CI);
3092 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3093 Assert(
3094 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3095 "cannot guarantee tail call due to mismatched parameter types", &CI);
3096 }
3097 }
3098 Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3099 "cannot guarantee tail call due to mismatched varargs", &CI);
3100 Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3101 "cannot guarantee tail call due to mismatched return types", &CI);
3102
3103 // - The calling conventions of the caller and callee must match.
3104 Assert(F->getCallingConv() == CI.getCallingConv(),
3105 "cannot guarantee tail call due to mismatched calling conv", &CI);
3106
3107 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3108 // returned, and inalloca, must match.
3109 AttributeList CallerAttrs = F->getAttributes();
3110 AttributeList CalleeAttrs = CI.getAttributes();
3111 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3112 AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
3113 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
3114 Assert(CallerABIAttrs == CalleeABIAttrs,
3115 "cannot guarantee tail call due to mismatched ABI impacting "
3116 "function attributes",
3117 &CI, CI.getOperand(I));
3118 }
3119
3120 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3121 // or a pointer bitcast followed by a ret instruction.
3122 // - The ret instruction must return the (possibly bitcasted) value
3123 // produced by the call or void.
3124 Value *RetVal = &CI;
3125 Instruction *Next = CI.getNextNode();
3126
3127 // Handle the optional bitcast.
3128 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3129 Assert(BI->getOperand(0) == RetVal,
3130 "bitcast following musttail call must use the call", BI);
3131 RetVal = BI;
3132 Next = BI->getNextNode();
3133 }
3134
3135 // Check the return.
3136 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3137 Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3138 &CI);
3139 Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3140 "musttail call result must be returned", Ret);
3141 }
3142
visitCallInst(CallInst & CI)3143 void Verifier::visitCallInst(CallInst &CI) {
3144 visitCallBase(CI);
3145
3146 if (CI.isMustTailCall())
3147 verifyMustTailCall(CI);
3148 }
3149
visitInvokeInst(InvokeInst & II)3150 void Verifier::visitInvokeInst(InvokeInst &II) {
3151 visitCallBase(II);
3152
3153 // Verify that the first non-PHI instruction of the unwind destination is an
3154 // exception handling instruction.
3155 Assert(
3156 II.getUnwindDest()->isEHPad(),
3157 "The unwind destination does not have an exception handling instruction!",
3158 &II);
3159
3160 visitTerminator(II);
3161 }
3162
3163 /// visitUnaryOperator - Check the argument to the unary operator.
3164 ///
visitUnaryOperator(UnaryOperator & U)3165 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3166 Assert(U.getType() == U.getOperand(0)->getType(),
3167 "Unary operators must have same type for"
3168 "operands and result!",
3169 &U);
3170
3171 switch (U.getOpcode()) {
3172 // Check that floating-point arithmetic operators are only used with
3173 // floating-point operands.
3174 case Instruction::FNeg:
3175 Assert(U.getType()->isFPOrFPVectorTy(),
3176 "FNeg operator only works with float types!", &U);
3177 break;
3178 default:
3179 llvm_unreachable("Unknown UnaryOperator opcode!");
3180 }
3181
3182 visitInstruction(U);
3183 }
3184
3185 /// visitBinaryOperator - Check that both arguments to the binary operator are
3186 /// of the same type!
3187 ///
visitBinaryOperator(BinaryOperator & B)3188 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3189 Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3190 "Both operands to a binary operator are not of the same type!", &B);
3191
3192 switch (B.getOpcode()) {
3193 // Check that integer arithmetic operators are only used with
3194 // integral operands.
3195 case Instruction::Add:
3196 case Instruction::Sub:
3197 case Instruction::Mul:
3198 case Instruction::SDiv:
3199 case Instruction::UDiv:
3200 case Instruction::SRem:
3201 case Instruction::URem:
3202 Assert(B.getType()->isIntOrIntVectorTy(),
3203 "Integer arithmetic operators only work with integral types!", &B);
3204 Assert(B.getType() == B.getOperand(0)->getType(),
3205 "Integer arithmetic operators must have same type "
3206 "for operands and result!",
3207 &B);
3208 break;
3209 // Check that floating-point arithmetic operators are only used with
3210 // floating-point operands.
3211 case Instruction::FAdd:
3212 case Instruction::FSub:
3213 case Instruction::FMul:
3214 case Instruction::FDiv:
3215 case Instruction::FRem:
3216 Assert(B.getType()->isFPOrFPVectorTy(),
3217 "Floating-point arithmetic operators only work with "
3218 "floating-point types!",
3219 &B);
3220 Assert(B.getType() == B.getOperand(0)->getType(),
3221 "Floating-point arithmetic operators must have same type "
3222 "for operands and result!",
3223 &B);
3224 break;
3225 // Check that logical operators are only used with integral operands.
3226 case Instruction::And:
3227 case Instruction::Or:
3228 case Instruction::Xor:
3229 Assert(B.getType()->isIntOrIntVectorTy(),
3230 "Logical operators only work with integral types!", &B);
3231 Assert(B.getType() == B.getOperand(0)->getType(),
3232 "Logical operators must have same type for operands and result!",
3233 &B);
3234 break;
3235 case Instruction::Shl:
3236 case Instruction::LShr:
3237 case Instruction::AShr:
3238 Assert(B.getType()->isIntOrIntVectorTy(),
3239 "Shifts only work with integral types!", &B);
3240 Assert(B.getType() == B.getOperand(0)->getType(),
3241 "Shift return type must be same as operands!", &B);
3242 break;
3243 default:
3244 llvm_unreachable("Unknown BinaryOperator opcode!");
3245 }
3246
3247 visitInstruction(B);
3248 }
3249
visitICmpInst(ICmpInst & IC)3250 void Verifier::visitICmpInst(ICmpInst &IC) {
3251 // Check that the operands are the same type
3252 Type *Op0Ty = IC.getOperand(0)->getType();
3253 Type *Op1Ty = IC.getOperand(1)->getType();
3254 Assert(Op0Ty == Op1Ty,
3255 "Both operands to ICmp instruction are not of the same type!", &IC);
3256 // Check that the operands are the right type
3257 Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3258 "Invalid operand types for ICmp instruction", &IC);
3259 // Check that the predicate is valid.
3260 Assert(IC.isIntPredicate(),
3261 "Invalid predicate in ICmp instruction!", &IC);
3262
3263 visitInstruction(IC);
3264 }
3265
visitFCmpInst(FCmpInst & FC)3266 void Verifier::visitFCmpInst(FCmpInst &FC) {
3267 // Check that the operands are the same type
3268 Type *Op0Ty = FC.getOperand(0)->getType();
3269 Type *Op1Ty = FC.getOperand(1)->getType();
3270 Assert(Op0Ty == Op1Ty,
3271 "Both operands to FCmp instruction are not of the same type!", &FC);
3272 // Check that the operands are the right type
3273 Assert(Op0Ty->isFPOrFPVectorTy(),
3274 "Invalid operand types for FCmp instruction", &FC);
3275 // Check that the predicate is valid.
3276 Assert(FC.isFPPredicate(),
3277 "Invalid predicate in FCmp instruction!", &FC);
3278
3279 visitInstruction(FC);
3280 }
3281
visitExtractElementInst(ExtractElementInst & EI)3282 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3283 Assert(
3284 ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3285 "Invalid extractelement operands!", &EI);
3286 visitInstruction(EI);
3287 }
3288
visitInsertElementInst(InsertElementInst & IE)3289 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3290 Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3291 IE.getOperand(2)),
3292 "Invalid insertelement operands!", &IE);
3293 visitInstruction(IE);
3294 }
3295
visitShuffleVectorInst(ShuffleVectorInst & SV)3296 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3297 Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3298 SV.getOperand(2)),
3299 "Invalid shufflevector operands!", &SV);
3300 visitInstruction(SV);
3301 }
3302
visitGetElementPtrInst(GetElementPtrInst & GEP)3303 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3304 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3305
3306 Assert(isa<PointerType>(TargetTy),
3307 "GEP base pointer is not a vector or a vector of pointers", &GEP);
3308 Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3309
3310 SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3311 Assert(all_of(
3312 Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3313 "GEP indexes must be integers", &GEP);
3314 Type *ElTy =
3315 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3316 Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3317
3318 Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3319 GEP.getResultElementType() == ElTy,
3320 "GEP is not of right type for indices!", &GEP, ElTy);
3321
3322 if (GEP.getType()->isVectorTy()) {
3323 // Additional checks for vector GEPs.
3324 unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3325 if (GEP.getPointerOperandType()->isVectorTy())
3326 Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3327 "Vector GEP result width doesn't match operand's", &GEP);
3328 for (Value *Idx : Idxs) {
3329 Type *IndexTy = Idx->getType();
3330 if (IndexTy->isVectorTy()) {
3331 unsigned IndexWidth = IndexTy->getVectorNumElements();
3332 Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3333 }
3334 Assert(IndexTy->isIntOrIntVectorTy(),
3335 "All GEP indices should be of integer type");
3336 }
3337 }
3338
3339 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3340 Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3341 "GEP address space doesn't match type", &GEP);
3342 }
3343
3344 visitInstruction(GEP);
3345 }
3346
isContiguous(const ConstantRange & A,const ConstantRange & B)3347 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3348 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3349 }
3350
visitRangeMetadata(Instruction & I,MDNode * Range,Type * Ty)3351 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3352 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3353 "precondition violation");
3354
3355 unsigned NumOperands = Range->getNumOperands();
3356 Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3357 unsigned NumRanges = NumOperands / 2;
3358 Assert(NumRanges >= 1, "It should have at least one range!", Range);
3359
3360 ConstantRange LastRange(1, true); // Dummy initial value
3361 for (unsigned i = 0; i < NumRanges; ++i) {
3362 ConstantInt *Low =
3363 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3364 Assert(Low, "The lower limit must be an integer!", Low);
3365 ConstantInt *High =
3366 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3367 Assert(High, "The upper limit must be an integer!", High);
3368 Assert(High->getType() == Low->getType() && High->getType() == Ty,
3369 "Range types must match instruction type!", &I);
3370
3371 APInt HighV = High->getValue();
3372 APInt LowV = Low->getValue();
3373 ConstantRange CurRange(LowV, HighV);
3374 Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3375 "Range must not be empty!", Range);
3376 if (i != 0) {
3377 Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3378 "Intervals are overlapping", Range);
3379 Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3380 Range);
3381 Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3382 Range);
3383 }
3384 LastRange = ConstantRange(LowV, HighV);
3385 }
3386 if (NumRanges > 2) {
3387 APInt FirstLow =
3388 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3389 APInt FirstHigh =
3390 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3391 ConstantRange FirstRange(FirstLow, FirstHigh);
3392 Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3393 "Intervals are overlapping", Range);
3394 Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3395 Range);
3396 }
3397 }
3398
checkAtomicMemAccessSize(Type * Ty,const Instruction * I)3399 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3400 unsigned Size = DL.getTypeSizeInBits(Ty);
3401 Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3402 Assert(!(Size & (Size - 1)),
3403 "atomic memory access' operand must have a power-of-two size", Ty, I);
3404 }
3405
visitLoadInst(LoadInst & LI)3406 void Verifier::visitLoadInst(LoadInst &LI) {
3407 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3408 Assert(PTy, "Load operand must be a pointer.", &LI);
3409 Type *ElTy = LI.getType();
3410 Assert(LI.getAlignment() <= Value::MaximumAlignment,
3411 "huge alignment values are unsupported", &LI);
3412 Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3413 if (LI.isAtomic()) {
3414 Assert(LI.getOrdering() != AtomicOrdering::Release &&
3415 LI.getOrdering() != AtomicOrdering::AcquireRelease,
3416 "Load cannot have Release ordering", &LI);
3417 Assert(LI.getAlignment() != 0,
3418 "Atomic load must specify explicit alignment", &LI);
3419 Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3420 "atomic load operand must have integer, pointer, or floating point "
3421 "type!",
3422 ElTy, &LI);
3423 checkAtomicMemAccessSize(ElTy, &LI);
3424 } else {
3425 Assert(LI.getSyncScopeID() == SyncScope::System,
3426 "Non-atomic load cannot have SynchronizationScope specified", &LI);
3427 }
3428
3429 visitInstruction(LI);
3430 }
3431
visitStoreInst(StoreInst & SI)3432 void Verifier::visitStoreInst(StoreInst &SI) {
3433 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3434 Assert(PTy, "Store operand must be a pointer.", &SI);
3435 Type *ElTy = PTy->getElementType();
3436 Assert(ElTy == SI.getOperand(0)->getType(),
3437 "Stored value type does not match pointer operand type!", &SI, ElTy);
3438 Assert(SI.getAlignment() <= Value::MaximumAlignment,
3439 "huge alignment values are unsupported", &SI);
3440 Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3441 if (SI.isAtomic()) {
3442 Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3443 SI.getOrdering() != AtomicOrdering::AcquireRelease,
3444 "Store cannot have Acquire ordering", &SI);
3445 Assert(SI.getAlignment() != 0,
3446 "Atomic store must specify explicit alignment", &SI);
3447 Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3448 "atomic store operand must have integer, pointer, or floating point "
3449 "type!",
3450 ElTy, &SI);
3451 checkAtomicMemAccessSize(ElTy, &SI);
3452 } else {
3453 Assert(SI.getSyncScopeID() == SyncScope::System,
3454 "Non-atomic store cannot have SynchronizationScope specified", &SI);
3455 }
3456 visitInstruction(SI);
3457 }
3458
3459 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
verifySwiftErrorCall(CallBase & Call,const Value * SwiftErrorVal)3460 void Verifier::verifySwiftErrorCall(CallBase &Call,
3461 const Value *SwiftErrorVal) {
3462 unsigned Idx = 0;
3463 for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3464 if (*I == SwiftErrorVal) {
3465 Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3466 "swifterror value when used in a callsite should be marked "
3467 "with swifterror attribute",
3468 SwiftErrorVal, Call);
3469 }
3470 }
3471 }
3472
verifySwiftErrorValue(const Value * SwiftErrorVal)3473 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3474 // Check that swifterror value is only used by loads, stores, or as
3475 // a swifterror argument.
3476 for (const User *U : SwiftErrorVal->users()) {
3477 Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3478 isa<InvokeInst>(U),
3479 "swifterror value can only be loaded and stored from, or "
3480 "as a swifterror argument!",
3481 SwiftErrorVal, U);
3482 // If it is used by a store, check it is the second operand.
3483 if (auto StoreI = dyn_cast<StoreInst>(U))
3484 Assert(StoreI->getOperand(1) == SwiftErrorVal,
3485 "swifterror value should be the second operand when used "
3486 "by stores", SwiftErrorVal, U);
3487 if (auto *Call = dyn_cast<CallBase>(U))
3488 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3489 }
3490 }
3491
visitAllocaInst(AllocaInst & AI)3492 void Verifier::visitAllocaInst(AllocaInst &AI) {
3493 SmallPtrSet<Type*, 4> Visited;
3494 PointerType *PTy = AI.getType();
3495 // TODO: Relax this restriction?
3496 Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3497 "Allocation instruction pointer not in the stack address space!",
3498 &AI);
3499 Assert(AI.getAllocatedType()->isSized(&Visited),
3500 "Cannot allocate unsized type", &AI);
3501 Assert(AI.getArraySize()->getType()->isIntegerTy(),
3502 "Alloca array size must have integer type", &AI);
3503 Assert(AI.getAlignment() <= Value::MaximumAlignment,
3504 "huge alignment values are unsupported", &AI);
3505
3506 if (AI.isSwiftError()) {
3507 verifySwiftErrorValue(&AI);
3508 }
3509
3510 visitInstruction(AI);
3511 }
3512
visitAtomicCmpXchgInst(AtomicCmpXchgInst & CXI)3513 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3514
3515 // FIXME: more conditions???
3516 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3517 "cmpxchg instructions must be atomic.", &CXI);
3518 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3519 "cmpxchg instructions must be atomic.", &CXI);
3520 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3521 "cmpxchg instructions cannot be unordered.", &CXI);
3522 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3523 "cmpxchg instructions cannot be unordered.", &CXI);
3524 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3525 "cmpxchg instructions failure argument shall be no stronger than the "
3526 "success argument",
3527 &CXI);
3528 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3529 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3530 "cmpxchg failure ordering cannot include release semantics", &CXI);
3531
3532 PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3533 Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3534 Type *ElTy = PTy->getElementType();
3535 Assert(ElTy->isIntOrPtrTy(),
3536 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3537 checkAtomicMemAccessSize(ElTy, &CXI);
3538 Assert(ElTy == CXI.getOperand(1)->getType(),
3539 "Expected value type does not match pointer operand type!", &CXI,
3540 ElTy);
3541 Assert(ElTy == CXI.getOperand(2)->getType(),
3542 "Stored value type does not match pointer operand type!", &CXI, ElTy);
3543 visitInstruction(CXI);
3544 }
3545
visitAtomicRMWInst(AtomicRMWInst & RMWI)3546 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3547 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3548 "atomicrmw instructions must be atomic.", &RMWI);
3549 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3550 "atomicrmw instructions cannot be unordered.", &RMWI);
3551 auto Op = RMWI.getOperation();
3552 PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3553 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3554 Type *ElTy = PTy->getElementType();
3555 if (Op == AtomicRMWInst::Xchg) {
3556 Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3557 AtomicRMWInst::getOperationName(Op) +
3558 " operand must have integer or floating point type!",
3559 &RMWI, ElTy);
3560 } else if (AtomicRMWInst::isFPOperation(Op)) {
3561 Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3562 AtomicRMWInst::getOperationName(Op) +
3563 " operand must have floating point type!",
3564 &RMWI, ElTy);
3565 } else {
3566 Assert(ElTy->isIntegerTy(), "atomicrmw " +
3567 AtomicRMWInst::getOperationName(Op) +
3568 " operand must have integer type!",
3569 &RMWI, ElTy);
3570 }
3571 checkAtomicMemAccessSize(ElTy, &RMWI);
3572 Assert(ElTy == RMWI.getOperand(1)->getType(),
3573 "Argument value type does not match pointer operand type!", &RMWI,
3574 ElTy);
3575 Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3576 "Invalid binary operation!", &RMWI);
3577 visitInstruction(RMWI);
3578 }
3579
visitFenceInst(FenceInst & FI)3580 void Verifier::visitFenceInst(FenceInst &FI) {
3581 const AtomicOrdering Ordering = FI.getOrdering();
3582 Assert(Ordering == AtomicOrdering::Acquire ||
3583 Ordering == AtomicOrdering::Release ||
3584 Ordering == AtomicOrdering::AcquireRelease ||
3585 Ordering == AtomicOrdering::SequentiallyConsistent,
3586 "fence instructions may only have acquire, release, acq_rel, or "
3587 "seq_cst ordering.",
3588 &FI);
3589 visitInstruction(FI);
3590 }
3591
visitExtractValueInst(ExtractValueInst & EVI)3592 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3593 Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3594 EVI.getIndices()) == EVI.getType(),
3595 "Invalid ExtractValueInst operands!", &EVI);
3596
3597 visitInstruction(EVI);
3598 }
3599
visitInsertValueInst(InsertValueInst & IVI)3600 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3601 Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3602 IVI.getIndices()) ==
3603 IVI.getOperand(1)->getType(),
3604 "Invalid InsertValueInst operands!", &IVI);
3605
3606 visitInstruction(IVI);
3607 }
3608
getParentPad(Value * EHPad)3609 static Value *getParentPad(Value *EHPad) {
3610 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3611 return FPI->getParentPad();
3612
3613 return cast<CatchSwitchInst>(EHPad)->getParentPad();
3614 }
3615
visitEHPadPredecessors(Instruction & I)3616 void Verifier::visitEHPadPredecessors(Instruction &I) {
3617 assert(I.isEHPad());
3618
3619 BasicBlock *BB = I.getParent();
3620 Function *F = BB->getParent();
3621
3622 Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3623
3624 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3625 // The landingpad instruction defines its parent as a landing pad block. The
3626 // landing pad block may be branched to only by the unwind edge of an
3627 // invoke.
3628 for (BasicBlock *PredBB : predecessors(BB)) {
3629 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3630 Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3631 "Block containing LandingPadInst must be jumped to "
3632 "only by the unwind edge of an invoke.",
3633 LPI);
3634 }
3635 return;
3636 }
3637 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3638 if (!pred_empty(BB))
3639 Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3640 "Block containg CatchPadInst must be jumped to "
3641 "only by its catchswitch.",
3642 CPI);
3643 Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3644 "Catchswitch cannot unwind to one of its catchpads",
3645 CPI->getCatchSwitch(), CPI);
3646 return;
3647 }
3648
3649 // Verify that each pred has a legal terminator with a legal to/from EH
3650 // pad relationship.
3651 Instruction *ToPad = &I;
3652 Value *ToPadParent = getParentPad(ToPad);
3653 for (BasicBlock *PredBB : predecessors(BB)) {
3654 Instruction *TI = PredBB->getTerminator();
3655 Value *FromPad;
3656 if (auto *II = dyn_cast<InvokeInst>(TI)) {
3657 Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3658 "EH pad must be jumped to via an unwind edge", ToPad, II);
3659 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3660 FromPad = Bundle->Inputs[0];
3661 else
3662 FromPad = ConstantTokenNone::get(II->getContext());
3663 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3664 FromPad = CRI->getOperand(0);
3665 Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3666 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3667 FromPad = CSI;
3668 } else {
3669 Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3670 }
3671
3672 // The edge may exit from zero or more nested pads.
3673 SmallSet<Value *, 8> Seen;
3674 for (;; FromPad = getParentPad(FromPad)) {
3675 Assert(FromPad != ToPad,
3676 "EH pad cannot handle exceptions raised within it", FromPad, TI);
3677 if (FromPad == ToPadParent) {
3678 // This is a legal unwind edge.
3679 break;
3680 }
3681 Assert(!isa<ConstantTokenNone>(FromPad),
3682 "A single unwind edge may only enter one EH pad", TI);
3683 Assert(Seen.insert(FromPad).second,
3684 "EH pad jumps through a cycle of pads", FromPad);
3685 }
3686 }
3687 }
3688
visitLandingPadInst(LandingPadInst & LPI)3689 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3690 // The landingpad instruction is ill-formed if it doesn't have any clauses and
3691 // isn't a cleanup.
3692 Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3693 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3694
3695 visitEHPadPredecessors(LPI);
3696
3697 if (!LandingPadResultTy)
3698 LandingPadResultTy = LPI.getType();
3699 else
3700 Assert(LandingPadResultTy == LPI.getType(),
3701 "The landingpad instruction should have a consistent result type "
3702 "inside a function.",
3703 &LPI);
3704
3705 Function *F = LPI.getParent()->getParent();
3706 Assert(F->hasPersonalityFn(),
3707 "LandingPadInst needs to be in a function with a personality.", &LPI);
3708
3709 // The landingpad instruction must be the first non-PHI instruction in the
3710 // block.
3711 Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3712 "LandingPadInst not the first non-PHI instruction in the block.",
3713 &LPI);
3714
3715 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3716 Constant *Clause = LPI.getClause(i);
3717 if (LPI.isCatch(i)) {
3718 Assert(isa<PointerType>(Clause->getType()),
3719 "Catch operand does not have pointer type!", &LPI);
3720 } else {
3721 Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3722 Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3723 "Filter operand is not an array of constants!", &LPI);
3724 }
3725 }
3726
3727 visitInstruction(LPI);
3728 }
3729
visitResumeInst(ResumeInst & RI)3730 void Verifier::visitResumeInst(ResumeInst &RI) {
3731 Assert(RI.getFunction()->hasPersonalityFn(),
3732 "ResumeInst needs to be in a function with a personality.", &RI);
3733
3734 if (!LandingPadResultTy)
3735 LandingPadResultTy = RI.getValue()->getType();
3736 else
3737 Assert(LandingPadResultTy == RI.getValue()->getType(),
3738 "The resume instruction should have a consistent result type "
3739 "inside a function.",
3740 &RI);
3741
3742 visitTerminator(RI);
3743 }
3744
visitCatchPadInst(CatchPadInst & CPI)3745 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3746 BasicBlock *BB = CPI.getParent();
3747
3748 Function *F = BB->getParent();
3749 Assert(F->hasPersonalityFn(),
3750 "CatchPadInst needs to be in a function with a personality.", &CPI);
3751
3752 Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3753 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3754 CPI.getParentPad());
3755
3756 // The catchpad instruction must be the first non-PHI instruction in the
3757 // block.
3758 Assert(BB->getFirstNonPHI() == &CPI,
3759 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3760
3761 visitEHPadPredecessors(CPI);
3762 visitFuncletPadInst(CPI);
3763 }
3764
visitCatchReturnInst(CatchReturnInst & CatchReturn)3765 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3766 Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3767 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3768 CatchReturn.getOperand(0));
3769
3770 visitTerminator(CatchReturn);
3771 }
3772
visitCleanupPadInst(CleanupPadInst & CPI)3773 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3774 BasicBlock *BB = CPI.getParent();
3775
3776 Function *F = BB->getParent();
3777 Assert(F->hasPersonalityFn(),
3778 "CleanupPadInst needs to be in a function with a personality.", &CPI);
3779
3780 // The cleanuppad instruction must be the first non-PHI instruction in the
3781 // block.
3782 Assert(BB->getFirstNonPHI() == &CPI,
3783 "CleanupPadInst not the first non-PHI instruction in the block.",
3784 &CPI);
3785
3786 auto *ParentPad = CPI.getParentPad();
3787 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3788 "CleanupPadInst has an invalid parent.", &CPI);
3789
3790 visitEHPadPredecessors(CPI);
3791 visitFuncletPadInst(CPI);
3792 }
3793
visitFuncletPadInst(FuncletPadInst & FPI)3794 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3795 User *FirstUser = nullptr;
3796 Value *FirstUnwindPad = nullptr;
3797 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3798 SmallSet<FuncletPadInst *, 8> Seen;
3799
3800 while (!Worklist.empty()) {
3801 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3802 Assert(Seen.insert(CurrentPad).second,
3803 "FuncletPadInst must not be nested within itself", CurrentPad);
3804 Value *UnresolvedAncestorPad = nullptr;
3805 for (User *U : CurrentPad->users()) {
3806 BasicBlock *UnwindDest;
3807 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3808 UnwindDest = CRI->getUnwindDest();
3809 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3810 // We allow catchswitch unwind to caller to nest
3811 // within an outer pad that unwinds somewhere else,
3812 // because catchswitch doesn't have a nounwind variant.
3813 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3814 if (CSI->unwindsToCaller())
3815 continue;
3816 UnwindDest = CSI->getUnwindDest();
3817 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3818 UnwindDest = II->getUnwindDest();
3819 } else if (isa<CallInst>(U)) {
3820 // Calls which don't unwind may be found inside funclet
3821 // pads that unwind somewhere else. We don't *require*
3822 // such calls to be annotated nounwind.
3823 continue;
3824 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3825 // The unwind dest for a cleanup can only be found by
3826 // recursive search. Add it to the worklist, and we'll
3827 // search for its first use that determines where it unwinds.
3828 Worklist.push_back(CPI);
3829 continue;
3830 } else {
3831 Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3832 continue;
3833 }
3834
3835 Value *UnwindPad;
3836 bool ExitsFPI;
3837 if (UnwindDest) {
3838 UnwindPad = UnwindDest->getFirstNonPHI();
3839 if (!cast<Instruction>(UnwindPad)->isEHPad())
3840 continue;
3841 Value *UnwindParent = getParentPad(UnwindPad);
3842 // Ignore unwind edges that don't exit CurrentPad.
3843 if (UnwindParent == CurrentPad)
3844 continue;
3845 // Determine whether the original funclet pad is exited,
3846 // and if we are scanning nested pads determine how many
3847 // of them are exited so we can stop searching their
3848 // children.
3849 Value *ExitedPad = CurrentPad;
3850 ExitsFPI = false;
3851 do {
3852 if (ExitedPad == &FPI) {
3853 ExitsFPI = true;
3854 // Now we can resolve any ancestors of CurrentPad up to
3855 // FPI, but not including FPI since we need to make sure
3856 // to check all direct users of FPI for consistency.
3857 UnresolvedAncestorPad = &FPI;
3858 break;
3859 }
3860 Value *ExitedParent = getParentPad(ExitedPad);
3861 if (ExitedParent == UnwindParent) {
3862 // ExitedPad is the ancestor-most pad which this unwind
3863 // edge exits, so we can resolve up to it, meaning that
3864 // ExitedParent is the first ancestor still unresolved.
3865 UnresolvedAncestorPad = ExitedParent;
3866 break;
3867 }
3868 ExitedPad = ExitedParent;
3869 } while (!isa<ConstantTokenNone>(ExitedPad));
3870 } else {
3871 // Unwinding to caller exits all pads.
3872 UnwindPad = ConstantTokenNone::get(FPI.getContext());
3873 ExitsFPI = true;
3874 UnresolvedAncestorPad = &FPI;
3875 }
3876
3877 if (ExitsFPI) {
3878 // This unwind edge exits FPI. Make sure it agrees with other
3879 // such edges.
3880 if (FirstUser) {
3881 Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3882 "pad must have the same unwind "
3883 "dest",
3884 &FPI, U, FirstUser);
3885 } else {
3886 FirstUser = U;
3887 FirstUnwindPad = UnwindPad;
3888 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3889 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3890 getParentPad(UnwindPad) == getParentPad(&FPI))
3891 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3892 }
3893 }
3894 // Make sure we visit all uses of FPI, but for nested pads stop as
3895 // soon as we know where they unwind to.
3896 if (CurrentPad != &FPI)
3897 break;
3898 }
3899 if (UnresolvedAncestorPad) {
3900 if (CurrentPad == UnresolvedAncestorPad) {
3901 // When CurrentPad is FPI itself, we don't mark it as resolved even if
3902 // we've found an unwind edge that exits it, because we need to verify
3903 // all direct uses of FPI.
3904 assert(CurrentPad == &FPI);
3905 continue;
3906 }
3907 // Pop off the worklist any nested pads that we've found an unwind
3908 // destination for. The pads on the worklist are the uncles,
3909 // great-uncles, etc. of CurrentPad. We've found an unwind destination
3910 // for all ancestors of CurrentPad up to but not including
3911 // UnresolvedAncestorPad.
3912 Value *ResolvedPad = CurrentPad;
3913 while (!Worklist.empty()) {
3914 Value *UnclePad = Worklist.back();
3915 Value *AncestorPad = getParentPad(UnclePad);
3916 // Walk ResolvedPad up the ancestor list until we either find the
3917 // uncle's parent or the last resolved ancestor.
3918 while (ResolvedPad != AncestorPad) {
3919 Value *ResolvedParent = getParentPad(ResolvedPad);
3920 if (ResolvedParent == UnresolvedAncestorPad) {
3921 break;
3922 }
3923 ResolvedPad = ResolvedParent;
3924 }
3925 // If the resolved ancestor search didn't find the uncle's parent,
3926 // then the uncle is not yet resolved.
3927 if (ResolvedPad != AncestorPad)
3928 break;
3929 // This uncle is resolved, so pop it from the worklist.
3930 Worklist.pop_back();
3931 }
3932 }
3933 }
3934
3935 if (FirstUnwindPad) {
3936 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3937 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3938 Value *SwitchUnwindPad;
3939 if (SwitchUnwindDest)
3940 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3941 else
3942 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3943 Assert(SwitchUnwindPad == FirstUnwindPad,
3944 "Unwind edges out of a catch must have the same unwind dest as "
3945 "the parent catchswitch",
3946 &FPI, FirstUser, CatchSwitch);
3947 }
3948 }
3949
3950 visitInstruction(FPI);
3951 }
3952
visitCatchSwitchInst(CatchSwitchInst & CatchSwitch)3953 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3954 BasicBlock *BB = CatchSwitch.getParent();
3955
3956 Function *F = BB->getParent();
3957 Assert(F->hasPersonalityFn(),
3958 "CatchSwitchInst needs to be in a function with a personality.",
3959 &CatchSwitch);
3960
3961 // The catchswitch instruction must be the first non-PHI instruction in the
3962 // block.
3963 Assert(BB->getFirstNonPHI() == &CatchSwitch,
3964 "CatchSwitchInst not the first non-PHI instruction in the block.",
3965 &CatchSwitch);
3966
3967 auto *ParentPad = CatchSwitch.getParentPad();
3968 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3969 "CatchSwitchInst has an invalid parent.", ParentPad);
3970
3971 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3972 Instruction *I = UnwindDest->getFirstNonPHI();
3973 Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3974 "CatchSwitchInst must unwind to an EH block which is not a "
3975 "landingpad.",
3976 &CatchSwitch);
3977
3978 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3979 if (getParentPad(I) == ParentPad)
3980 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3981 }
3982
3983 Assert(CatchSwitch.getNumHandlers() != 0,
3984 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3985
3986 for (BasicBlock *Handler : CatchSwitch.handlers()) {
3987 Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3988 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3989 }
3990
3991 visitEHPadPredecessors(CatchSwitch);
3992 visitTerminator(CatchSwitch);
3993 }
3994
visitCleanupReturnInst(CleanupReturnInst & CRI)3995 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3996 Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3997 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3998 CRI.getOperand(0));
3999
4000 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4001 Instruction *I = UnwindDest->getFirstNonPHI();
4002 Assert(I->isEHPad() && !isa<LandingPadInst>(I),
4003 "CleanupReturnInst must unwind to an EH block which is not a "
4004 "landingpad.",
4005 &CRI);
4006 }
4007
4008 visitTerminator(CRI);
4009 }
4010
verifyDominatesUse(Instruction & I,unsigned i)4011 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4012 Instruction *Op = cast<Instruction>(I.getOperand(i));
4013 // If the we have an invalid invoke, don't try to compute the dominance.
4014 // We already reject it in the invoke specific checks and the dominance
4015 // computation doesn't handle multiple edges.
4016 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4017 if (II->getNormalDest() == II->getUnwindDest())
4018 return;
4019 }
4020
4021 // Quick check whether the def has already been encountered in the same block.
4022 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4023 // uses are defined to happen on the incoming edge, not at the instruction.
4024 //
4025 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4026 // wrapping an SSA value, assert that we've already encountered it. See
4027 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4028 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4029 return;
4030
4031 const Use &U = I.getOperandUse(i);
4032 Assert(DT.dominates(Op, U),
4033 "Instruction does not dominate all uses!", Op, &I);
4034 }
4035
visitDereferenceableMetadata(Instruction & I,MDNode * MD)4036 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4037 Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
4038 "apply only to pointer types", &I);
4039 Assert((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4040 "dereferenceable, dereferenceable_or_null apply only to load"
4041 " and inttoptr instructions, use attributes for calls or invokes", &I);
4042 Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
4043 "take one operand!", &I);
4044 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4045 Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
4046 "dereferenceable_or_null metadata value must be an i64!", &I);
4047 }
4048
visitProfMetadata(Instruction & I,MDNode * MD)4049 void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4050 Assert(MD->getNumOperands() >= 2,
4051 "!prof annotations should have no less than 2 operands", MD);
4052
4053 // Check first operand.
4054 Assert(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4055 Assert(isa<MDString>(MD->getOperand(0)),
4056 "expected string with name of the !prof annotation", MD);
4057 MDString *MDS = cast<MDString>(MD->getOperand(0));
4058 StringRef ProfName = MDS->getString();
4059
4060 // Check consistency of !prof branch_weights metadata.
4061 if (ProfName.equals("branch_weights")) {
4062 unsigned ExpectedNumOperands = 0;
4063 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4064 ExpectedNumOperands = BI->getNumSuccessors();
4065 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4066 ExpectedNumOperands = SI->getNumSuccessors();
4067 else if (isa<CallInst>(&I) || isa<InvokeInst>(&I))
4068 ExpectedNumOperands = 1;
4069 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4070 ExpectedNumOperands = IBI->getNumDestinations();
4071 else if (isa<SelectInst>(&I))
4072 ExpectedNumOperands = 2;
4073 else
4074 CheckFailed("!prof branch_weights are not allowed for this instruction",
4075 MD);
4076
4077 Assert(MD->getNumOperands() == 1 + ExpectedNumOperands,
4078 "Wrong number of operands", MD);
4079 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4080 auto &MDO = MD->getOperand(i);
4081 Assert(MDO, "second operand should not be null", MD);
4082 Assert(mdconst::dyn_extract<ConstantInt>(MDO),
4083 "!prof brunch_weights operand is not a const int");
4084 }
4085 }
4086 }
4087
4088 /// verifyInstruction - Verify that an instruction is well formed.
4089 ///
visitInstruction(Instruction & I)4090 void Verifier::visitInstruction(Instruction &I) {
4091 BasicBlock *BB = I.getParent();
4092 Assert(BB, "Instruction not embedded in basic block!", &I);
4093
4094 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4095 for (User *U : I.users()) {
4096 Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
4097 "Only PHI nodes may reference their own value!", &I);
4098 }
4099 }
4100
4101 // Check that void typed values don't have names
4102 Assert(!I.getType()->isVoidTy() || !I.hasName(),
4103 "Instruction has a name, but provides a void value!", &I);
4104
4105 // Check that the return value of the instruction is either void or a legal
4106 // value type.
4107 Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4108 "Instruction returns a non-scalar type!", &I);
4109
4110 // Check that the instruction doesn't produce metadata. Calls are already
4111 // checked against the callee type.
4112 Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4113 "Invalid use of metadata!", &I);
4114
4115 // Check that all uses of the instruction, if they are instructions
4116 // themselves, actually have parent basic blocks. If the use is not an
4117 // instruction, it is an error!
4118 for (Use &U : I.uses()) {
4119 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4120 Assert(Used->getParent() != nullptr,
4121 "Instruction referencing"
4122 " instruction not embedded in a basic block!",
4123 &I, Used);
4124 else {
4125 CheckFailed("Use of instruction is not an instruction!", U);
4126 return;
4127 }
4128 }
4129
4130 // Get a pointer to the call base of the instruction if it is some form of
4131 // call.
4132 const CallBase *CBI = dyn_cast<CallBase>(&I);
4133
4134 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4135 Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4136
4137 // Check to make sure that only first-class-values are operands to
4138 // instructions.
4139 if (!I.getOperand(i)->getType()->isFirstClassType()) {
4140 Assert(false, "Instruction operands must be first-class values!", &I);
4141 }
4142
4143 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4144 // Check to make sure that the "address of" an intrinsic function is never
4145 // taken.
4146 Assert(!F->isIntrinsic() ||
4147 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
4148 "Cannot take the address of an intrinsic!", &I);
4149 Assert(
4150 !F->isIntrinsic() || isa<CallInst>(I) ||
4151 F->getIntrinsicID() == Intrinsic::donothing ||
4152 F->getIntrinsicID() == Intrinsic::coro_resume ||
4153 F->getIntrinsicID() == Intrinsic::coro_destroy ||
4154 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
4155 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4156 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4157 F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch,
4158 "Cannot invoke an intrinsic other than donothing, patchpoint, "
4159 "statepoint, coro_resume or coro_destroy",
4160 &I);
4161 Assert(F->getParent() == &M, "Referencing function in another module!",
4162 &I, &M, F, F->getParent());
4163 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4164 Assert(OpBB->getParent() == BB->getParent(),
4165 "Referring to a basic block in another function!", &I);
4166 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4167 Assert(OpArg->getParent() == BB->getParent(),
4168 "Referring to an argument in another function!", &I);
4169 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4170 Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4171 &M, GV, GV->getParent());
4172 } else if (isa<Instruction>(I.getOperand(i))) {
4173 verifyDominatesUse(I, i);
4174 } else if (isa<InlineAsm>(I.getOperand(i))) {
4175 Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4176 "Cannot take the address of an inline asm!", &I);
4177 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4178 if (CE->getType()->isPtrOrPtrVectorTy() ||
4179 !DL.getNonIntegralAddressSpaces().empty()) {
4180 // If we have a ConstantExpr pointer, we need to see if it came from an
4181 // illegal bitcast. If the datalayout string specifies non-integral
4182 // address spaces then we also need to check for illegal ptrtoint and
4183 // inttoptr expressions.
4184 visitConstantExprsRecursively(CE);
4185 }
4186 }
4187 }
4188
4189 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4190 Assert(I.getType()->isFPOrFPVectorTy(),
4191 "fpmath requires a floating point result!", &I);
4192 Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4193 if (ConstantFP *CFP0 =
4194 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4195 const APFloat &Accuracy = CFP0->getValueAPF();
4196 Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4197 "fpmath accuracy must have float type", &I);
4198 Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4199 "fpmath accuracy not a positive number!", &I);
4200 } else {
4201 Assert(false, "invalid fpmath accuracy!", &I);
4202 }
4203 }
4204
4205 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4206 Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4207 "Ranges are only for loads, calls and invokes!", &I);
4208 visitRangeMetadata(I, Range, I.getType());
4209 }
4210
4211 if (I.getMetadata(LLVMContext::MD_nonnull)) {
4212 Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4213 &I);
4214 Assert(isa<LoadInst>(I),
4215 "nonnull applies only to load instructions, use attributes"
4216 " for calls or invokes",
4217 &I);
4218 }
4219
4220 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4221 visitDereferenceableMetadata(I, MD);
4222
4223 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4224 visitDereferenceableMetadata(I, MD);
4225
4226 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4227 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4228
4229 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4230 Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4231 &I);
4232 Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4233 "use attributes for calls or invokes", &I);
4234 Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4235 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4236 Assert(CI && CI->getType()->isIntegerTy(64),
4237 "align metadata value must be an i64!", &I);
4238 uint64_t Align = CI->getZExtValue();
4239 Assert(isPowerOf2_64(Align),
4240 "align metadata value must be a power of 2!", &I);
4241 Assert(Align <= Value::MaximumAlignment,
4242 "alignment is larger that implementation defined limit", &I);
4243 }
4244
4245 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
4246 visitProfMetadata(I, MD);
4247
4248 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4249 AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4250 visitMDNode(*N);
4251 }
4252
4253 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
4254 verifyFragmentExpression(*DII);
4255 verifyNotEntryValue(*DII);
4256 }
4257
4258 InstsInThisBlock.insert(&I);
4259 }
4260
4261 /// Allow intrinsics to be verified in different ways.
visitIntrinsicCall(Intrinsic::ID ID,CallBase & Call)4262 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4263 Function *IF = Call.getCalledFunction();
4264 Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4265 IF);
4266
4267 // Verify that the intrinsic prototype lines up with what the .td files
4268 // describe.
4269 FunctionType *IFTy = IF->getFunctionType();
4270 bool IsVarArg = IFTy->isVarArg();
4271
4272 SmallVector<Intrinsic::IITDescriptor, 8> Table;
4273 getIntrinsicInfoTableEntries(ID, Table);
4274 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4275
4276 // Walk the descriptors to extract overloaded types.
4277 SmallVector<Type *, 4> ArgTys;
4278 Intrinsic::MatchIntrinsicTypesResult Res =
4279 Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
4280 Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
4281 "Intrinsic has incorrect return type!", IF);
4282 Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
4283 "Intrinsic has incorrect argument type!", IF);
4284
4285 // Verify if the intrinsic call matches the vararg property.
4286 if (IsVarArg)
4287 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4288 "Intrinsic was not defined with variable arguments!", IF);
4289 else
4290 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4291 "Callsite was not defined with variable arguments!", IF);
4292
4293 // All descriptors should be absorbed by now.
4294 Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4295
4296 // Now that we have the intrinsic ID and the actual argument types (and we
4297 // know they are legal for the intrinsic!) get the intrinsic name through the
4298 // usual means. This allows us to verify the mangling of argument types into
4299 // the name.
4300 const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4301 Assert(ExpectedName == IF->getName(),
4302 "Intrinsic name not mangled correctly for type arguments! "
4303 "Should be: " +
4304 ExpectedName,
4305 IF);
4306
4307 // If the intrinsic takes MDNode arguments, verify that they are either global
4308 // or are local to *this* function.
4309 for (Value *V : Call.args())
4310 if (auto *MD = dyn_cast<MetadataAsValue>(V))
4311 visitMetadataAsValue(*MD, Call.getCaller());
4312
4313 switch (ID) {
4314 default:
4315 break;
4316 case Intrinsic::coro_id: {
4317 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4318 if (isa<ConstantPointerNull>(InfoArg))
4319 break;
4320 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4321 Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4322 "info argument of llvm.coro.begin must refer to an initialized "
4323 "constant");
4324 Constant *Init = GV->getInitializer();
4325 Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4326 "info argument of llvm.coro.begin must refer to either a struct or "
4327 "an array");
4328 break;
4329 }
4330 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC, DAGN) \
4331 case Intrinsic::INTRINSIC:
4332 #include "llvm/IR/ConstrainedOps.def"
4333 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4334 break;
4335 case Intrinsic::dbg_declare: // llvm.dbg.declare
4336 Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4337 "invalid llvm.dbg.declare intrinsic call 1", Call);
4338 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4339 break;
4340 case Intrinsic::dbg_addr: // llvm.dbg.addr
4341 visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4342 break;
4343 case Intrinsic::dbg_value: // llvm.dbg.value
4344 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4345 break;
4346 case Intrinsic::dbg_label: // llvm.dbg.label
4347 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4348 break;
4349 case Intrinsic::memcpy:
4350 case Intrinsic::memmove:
4351 case Intrinsic::memset: {
4352 const auto *MI = cast<MemIntrinsic>(&Call);
4353 auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4354 return Alignment == 0 || isPowerOf2_32(Alignment);
4355 };
4356 Assert(IsValidAlignment(MI->getDestAlignment()),
4357 "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4358 Call);
4359 if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4360 Assert(IsValidAlignment(MTI->getSourceAlignment()),
4361 "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4362 Call);
4363 }
4364
4365 break;
4366 }
4367 case Intrinsic::memcpy_element_unordered_atomic:
4368 case Intrinsic::memmove_element_unordered_atomic:
4369 case Intrinsic::memset_element_unordered_atomic: {
4370 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4371
4372 ConstantInt *ElementSizeCI =
4373 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4374 const APInt &ElementSizeVal = ElementSizeCI->getValue();
4375 Assert(ElementSizeVal.isPowerOf2(),
4376 "element size of the element-wise atomic memory intrinsic "
4377 "must be a power of 2",
4378 Call);
4379
4380 if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
4381 uint64_t Length = LengthCI->getZExtValue();
4382 uint64_t ElementSize = AMI->getElementSizeInBytes();
4383 Assert((Length % ElementSize) == 0,
4384 "constant length must be a multiple of the element size in the "
4385 "element-wise atomic memory intrinsic",
4386 Call);
4387 }
4388
4389 auto IsValidAlignment = [&](uint64_t Alignment) {
4390 return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4391 };
4392 uint64_t DstAlignment = AMI->getDestAlignment();
4393 Assert(IsValidAlignment(DstAlignment),
4394 "incorrect alignment of the destination argument", Call);
4395 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4396 uint64_t SrcAlignment = AMT->getSourceAlignment();
4397 Assert(IsValidAlignment(SrcAlignment),
4398 "incorrect alignment of the source argument", Call);
4399 }
4400 break;
4401 }
4402 case Intrinsic::gcroot:
4403 case Intrinsic::gcwrite:
4404 case Intrinsic::gcread:
4405 if (ID == Intrinsic::gcroot) {
4406 AllocaInst *AI =
4407 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
4408 Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4409 Assert(isa<Constant>(Call.getArgOperand(1)),
4410 "llvm.gcroot parameter #2 must be a constant.", Call);
4411 if (!AI->getAllocatedType()->isPointerTy()) {
4412 Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4413 "llvm.gcroot parameter #1 must either be a pointer alloca, "
4414 "or argument #2 must be a non-null constant.",
4415 Call);
4416 }
4417 }
4418
4419 Assert(Call.getParent()->getParent()->hasGC(),
4420 "Enclosing function does not use GC.", Call);
4421 break;
4422 case Intrinsic::init_trampoline:
4423 Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4424 "llvm.init_trampoline parameter #2 must resolve to a function.",
4425 Call);
4426 break;
4427 case Intrinsic::prefetch:
4428 Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4429 cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4430 "invalid arguments to llvm.prefetch", Call);
4431 break;
4432 case Intrinsic::stackprotector:
4433 Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4434 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4435 break;
4436 case Intrinsic::localescape: {
4437 BasicBlock *BB = Call.getParent();
4438 Assert(BB == &BB->getParent()->front(),
4439 "llvm.localescape used outside of entry block", Call);
4440 Assert(!SawFrameEscape,
4441 "multiple calls to llvm.localescape in one function", Call);
4442 for (Value *Arg : Call.args()) {
4443 if (isa<ConstantPointerNull>(Arg))
4444 continue; // Null values are allowed as placeholders.
4445 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4446 Assert(AI && AI->isStaticAlloca(),
4447 "llvm.localescape only accepts static allocas", Call);
4448 }
4449 FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4450 SawFrameEscape = true;
4451 break;
4452 }
4453 case Intrinsic::localrecover: {
4454 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4455 Function *Fn = dyn_cast<Function>(FnArg);
4456 Assert(Fn && !Fn->isDeclaration(),
4457 "llvm.localrecover first "
4458 "argument must be function defined in this module",
4459 Call);
4460 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
4461 auto &Entry = FrameEscapeInfo[Fn];
4462 Entry.second = unsigned(
4463 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4464 break;
4465 }
4466
4467 case Intrinsic::experimental_gc_statepoint:
4468 if (auto *CI = dyn_cast<CallInst>(&Call))
4469 Assert(!CI->isInlineAsm(),
4470 "gc.statepoint support for inline assembly unimplemented", CI);
4471 Assert(Call.getParent()->getParent()->hasGC(),
4472 "Enclosing function does not use GC.", Call);
4473
4474 verifyStatepoint(Call);
4475 break;
4476 case Intrinsic::experimental_gc_result: {
4477 Assert(Call.getParent()->getParent()->hasGC(),
4478 "Enclosing function does not use GC.", Call);
4479 // Are we tied to a statepoint properly?
4480 const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4481 const Function *StatepointFn =
4482 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4483 Assert(StatepointFn && StatepointFn->isDeclaration() &&
4484 StatepointFn->getIntrinsicID() ==
4485 Intrinsic::experimental_gc_statepoint,
4486 "gc.result operand #1 must be from a statepoint", Call,
4487 Call.getArgOperand(0));
4488
4489 // Assert that result type matches wrapped callee.
4490 const Value *Target = StatepointCall->getArgOperand(2);
4491 auto *PT = cast<PointerType>(Target->getType());
4492 auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4493 Assert(Call.getType() == TargetFuncType->getReturnType(),
4494 "gc.result result type does not match wrapped callee", Call);
4495 break;
4496 }
4497 case Intrinsic::experimental_gc_relocate: {
4498 Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4499
4500 Assert(isa<PointerType>(Call.getType()->getScalarType()),
4501 "gc.relocate must return a pointer or a vector of pointers", Call);
4502
4503 // Check that this relocate is correctly tied to the statepoint
4504
4505 // This is case for relocate on the unwinding path of an invoke statepoint
4506 if (LandingPadInst *LandingPad =
4507 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4508
4509 const BasicBlock *InvokeBB =
4510 LandingPad->getParent()->getUniquePredecessor();
4511
4512 // Landingpad relocates should have only one predecessor with invoke
4513 // statepoint terminator
4514 Assert(InvokeBB, "safepoints should have unique landingpads",
4515 LandingPad->getParent());
4516 Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4517 InvokeBB);
4518 Assert(isStatepoint(InvokeBB->getTerminator()),
4519 "gc relocate should be linked to a statepoint", InvokeBB);
4520 } else {
4521 // In all other cases relocate should be tied to the statepoint directly.
4522 // This covers relocates on a normal return path of invoke statepoint and
4523 // relocates of a call statepoint.
4524 auto Token = Call.getArgOperand(0);
4525 Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4526 "gc relocate is incorrectly tied to the statepoint", Call, Token);
4527 }
4528
4529 // Verify rest of the relocate arguments.
4530 const CallBase &StatepointCall =
4531 *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
4532
4533 // Both the base and derived must be piped through the safepoint.
4534 Value *Base = Call.getArgOperand(1);
4535 Assert(isa<ConstantInt>(Base),
4536 "gc.relocate operand #2 must be integer offset", Call);
4537
4538 Value *Derived = Call.getArgOperand(2);
4539 Assert(isa<ConstantInt>(Derived),
4540 "gc.relocate operand #3 must be integer offset", Call);
4541
4542 const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4543 const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4544 // Check the bounds
4545 Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
4546 "gc.relocate: statepoint base index out of bounds", Call);
4547 Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
4548 "gc.relocate: statepoint derived index out of bounds", Call);
4549
4550 // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4551 // section of the statepoint's argument.
4552 Assert(StatepointCall.arg_size() > 0,
4553 "gc.statepoint: insufficient arguments");
4554 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4555 "gc.statement: number of call arguments must be constant integer");
4556 const unsigned NumCallArgs =
4557 cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4558 Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4559 "gc.statepoint: mismatch in number of call arguments");
4560 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4561 "gc.statepoint: number of transition arguments must be "
4562 "a constant integer");
4563 const int NumTransitionArgs =
4564 cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4565 ->getZExtValue();
4566 const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4567 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4568 "gc.statepoint: number of deoptimization arguments must be "
4569 "a constant integer");
4570 const int NumDeoptArgs =
4571 cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4572 ->getZExtValue();
4573 const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4574 const int GCParamArgsEnd = StatepointCall.arg_size();
4575 Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4576 "gc.relocate: statepoint base index doesn't fall within the "
4577 "'gc parameters' section of the statepoint call",
4578 Call);
4579 Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4580 "gc.relocate: statepoint derived index doesn't fall within the "
4581 "'gc parameters' section of the statepoint call",
4582 Call);
4583
4584 // Relocated value must be either a pointer type or vector-of-pointer type,
4585 // but gc_relocate does not need to return the same pointer type as the
4586 // relocated pointer. It can be casted to the correct type later if it's
4587 // desired. However, they must have the same address space and 'vectorness'
4588 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4589 Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4590 "gc.relocate: relocated value must be a gc pointer", Call);
4591
4592 auto ResultType = Call.getType();
4593 auto DerivedType = Relocate.getDerivedPtr()->getType();
4594 Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4595 "gc.relocate: vector relocates to vector and pointer to pointer",
4596 Call);
4597 Assert(
4598 ResultType->getPointerAddressSpace() ==
4599 DerivedType->getPointerAddressSpace(),
4600 "gc.relocate: relocating a pointer shouldn't change its address space",
4601 Call);
4602 break;
4603 }
4604 case Intrinsic::eh_exceptioncode:
4605 case Intrinsic::eh_exceptionpointer: {
4606 Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4607 "eh.exceptionpointer argument must be a catchpad", Call);
4608 break;
4609 }
4610 case Intrinsic::masked_load: {
4611 Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4612 Call);
4613
4614 Value *Ptr = Call.getArgOperand(0);
4615 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
4616 Value *Mask = Call.getArgOperand(2);
4617 Value *PassThru = Call.getArgOperand(3);
4618 Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4619 Call);
4620 Assert(Alignment->getValue().isPowerOf2(),
4621 "masked_load: alignment must be a power of 2", Call);
4622
4623 // DataTy is the overloaded type
4624 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4625 Assert(DataTy == Call.getType(),
4626 "masked_load: return must match pointer type", Call);
4627 Assert(PassThru->getType() == DataTy,
4628 "masked_load: pass through and data type must match", Call);
4629 Assert(Mask->getType()->getVectorNumElements() ==
4630 DataTy->getVectorNumElements(),
4631 "masked_load: vector mask must be same length as data", Call);
4632 break;
4633 }
4634 case Intrinsic::masked_store: {
4635 Value *Val = Call.getArgOperand(0);
4636 Value *Ptr = Call.getArgOperand(1);
4637 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
4638 Value *Mask = Call.getArgOperand(3);
4639 Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4640 Call);
4641 Assert(Alignment->getValue().isPowerOf2(),
4642 "masked_store: alignment must be a power of 2", Call);
4643
4644 // DataTy is the overloaded type
4645 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4646 Assert(DataTy == Val->getType(),
4647 "masked_store: storee must match pointer type", Call);
4648 Assert(Mask->getType()->getVectorNumElements() ==
4649 DataTy->getVectorNumElements(),
4650 "masked_store: vector mask must be same length as data", Call);
4651 break;
4652 }
4653
4654 case Intrinsic::experimental_guard: {
4655 Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4656 Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4657 "experimental_guard must have exactly one "
4658 "\"deopt\" operand bundle");
4659 break;
4660 }
4661
4662 case Intrinsic::experimental_deoptimize: {
4663 Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4664 Call);
4665 Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4666 "experimental_deoptimize must have exactly one "
4667 "\"deopt\" operand bundle");
4668 Assert(Call.getType() == Call.getFunction()->getReturnType(),
4669 "experimental_deoptimize return type must match caller return type");
4670
4671 if (isa<CallInst>(Call)) {
4672 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4673 Assert(RI,
4674 "calls to experimental_deoptimize must be followed by a return");
4675
4676 if (!Call.getType()->isVoidTy() && RI)
4677 Assert(RI->getReturnValue() == &Call,
4678 "calls to experimental_deoptimize must be followed by a return "
4679 "of the value computed by experimental_deoptimize");
4680 }
4681
4682 break;
4683 }
4684 case Intrinsic::sadd_sat:
4685 case Intrinsic::uadd_sat:
4686 case Intrinsic::ssub_sat:
4687 case Intrinsic::usub_sat: {
4688 Value *Op1 = Call.getArgOperand(0);
4689 Value *Op2 = Call.getArgOperand(1);
4690 Assert(Op1->getType()->isIntOrIntVectorTy(),
4691 "first operand of [us][add|sub]_sat must be an int type or vector "
4692 "of ints");
4693 Assert(Op2->getType()->isIntOrIntVectorTy(),
4694 "second operand of [us][add|sub]_sat must be an int type or vector "
4695 "of ints");
4696 break;
4697 }
4698 case Intrinsic::smul_fix:
4699 case Intrinsic::smul_fix_sat:
4700 case Intrinsic::umul_fix:
4701 case Intrinsic::umul_fix_sat:
4702 case Intrinsic::sdiv_fix:
4703 case Intrinsic::udiv_fix: {
4704 Value *Op1 = Call.getArgOperand(0);
4705 Value *Op2 = Call.getArgOperand(1);
4706 Assert(Op1->getType()->isIntOrIntVectorTy(),
4707 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
4708 "vector of ints");
4709 Assert(Op2->getType()->isIntOrIntVectorTy(),
4710 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
4711 "vector of ints");
4712
4713 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
4714 Assert(Op3->getType()->getBitWidth() <= 32,
4715 "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
4716
4717 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
4718 ID == Intrinsic::sdiv_fix) {
4719 Assert(
4720 Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4721 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
4722 "the operands");
4723 } else {
4724 Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
4725 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
4726 "to the width of the operands");
4727 }
4728 break;
4729 }
4730 case Intrinsic::lround:
4731 case Intrinsic::llround:
4732 case Intrinsic::lrint:
4733 case Intrinsic::llrint: {
4734 Type *ValTy = Call.getArgOperand(0)->getType();
4735 Type *ResultTy = Call.getType();
4736 Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4737 "Intrinsic does not support vectors", &Call);
4738 break;
4739 }
4740 };
4741 }
4742
4743 /// Carefully grab the subprogram from a local scope.
4744 ///
4745 /// This carefully grabs the subprogram from a local scope, avoiding the
4746 /// built-in assertions that would typically fire.
getSubprogram(Metadata * LocalScope)4747 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4748 if (!LocalScope)
4749 return nullptr;
4750
4751 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4752 return SP;
4753
4754 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4755 return getSubprogram(LB->getRawScope());
4756
4757 // Just return null; broken scope chains are checked elsewhere.
4758 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4759 return nullptr;
4760 }
4761
visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic & FPI)4762 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4763 unsigned NumOperands;
4764 bool HasRoundingMD;
4765 switch (FPI.getIntrinsicID()) {
4766 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4767 case Intrinsic::INTRINSIC: \
4768 NumOperands = NARG; \
4769 HasRoundingMD = ROUND_MODE; \
4770 break;
4771 #include "llvm/IR/ConstrainedOps.def"
4772 default:
4773 llvm_unreachable("Invalid constrained FP intrinsic!");
4774 }
4775 NumOperands += (1 + HasRoundingMD);
4776 // Compare intrinsics carry an extra predicate metadata operand.
4777 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
4778 NumOperands += 1;
4779 Assert((FPI.getNumArgOperands() == NumOperands),
4780 "invalid arguments for constrained FP intrinsic", &FPI);
4781
4782 switch (FPI.getIntrinsicID()) {
4783 case Intrinsic::experimental_constrained_lrint:
4784 case Intrinsic::experimental_constrained_llrint: {
4785 Type *ValTy = FPI.getArgOperand(0)->getType();
4786 Type *ResultTy = FPI.getType();
4787 Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4788 "Intrinsic does not support vectors", &FPI);
4789 }
4790 break;
4791
4792 case Intrinsic::experimental_constrained_lround:
4793 case Intrinsic::experimental_constrained_llround: {
4794 Type *ValTy = FPI.getArgOperand(0)->getType();
4795 Type *ResultTy = FPI.getType();
4796 Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4797 "Intrinsic does not support vectors", &FPI);
4798 break;
4799 }
4800
4801 case Intrinsic::experimental_constrained_fcmp:
4802 case Intrinsic::experimental_constrained_fcmps: {
4803 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
4804 Assert(CmpInst::isFPPredicate(Pred),
4805 "invalid predicate for constrained FP comparison intrinsic", &FPI);
4806 break;
4807 }
4808
4809 case Intrinsic::experimental_constrained_fptosi:
4810 case Intrinsic::experimental_constrained_fptoui: {
4811 Value *Operand = FPI.getArgOperand(0);
4812 uint64_t NumSrcElem = 0;
4813 Assert(Operand->getType()->isFPOrFPVectorTy(),
4814 "Intrinsic first argument must be floating point", &FPI);
4815 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4816 NumSrcElem = OperandT->getNumElements();
4817 }
4818
4819 Operand = &FPI;
4820 Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
4821 "Intrinsic first argument and result disagree on vector use", &FPI);
4822 Assert(Operand->getType()->isIntOrIntVectorTy(),
4823 "Intrinsic result must be an integer", &FPI);
4824 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4825 Assert(NumSrcElem == OperandT->getNumElements(),
4826 "Intrinsic first argument and result vector lengths must be equal",
4827 &FPI);
4828 }
4829 }
4830 break;
4831
4832 case Intrinsic::experimental_constrained_sitofp:
4833 case Intrinsic::experimental_constrained_uitofp: {
4834 Value *Operand = FPI.getArgOperand(0);
4835 uint64_t NumSrcElem = 0;
4836 Assert(Operand->getType()->isIntOrIntVectorTy(),
4837 "Intrinsic first argument must be integer", &FPI);
4838 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4839 NumSrcElem = OperandT->getNumElements();
4840 }
4841
4842 Operand = &FPI;
4843 Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
4844 "Intrinsic first argument and result disagree on vector use", &FPI);
4845 Assert(Operand->getType()->isFPOrFPVectorTy(),
4846 "Intrinsic result must be a floating point", &FPI);
4847 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4848 Assert(NumSrcElem == OperandT->getNumElements(),
4849 "Intrinsic first argument and result vector lengths must be equal",
4850 &FPI);
4851 }
4852 } break;
4853
4854 case Intrinsic::experimental_constrained_fptrunc:
4855 case Intrinsic::experimental_constrained_fpext: {
4856 Value *Operand = FPI.getArgOperand(0);
4857 Type *OperandTy = Operand->getType();
4858 Value *Result = &FPI;
4859 Type *ResultTy = Result->getType();
4860 Assert(OperandTy->isFPOrFPVectorTy(),
4861 "Intrinsic first argument must be FP or FP vector", &FPI);
4862 Assert(ResultTy->isFPOrFPVectorTy(),
4863 "Intrinsic result must be FP or FP vector", &FPI);
4864 Assert(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
4865 "Intrinsic first argument and result disagree on vector use", &FPI);
4866 if (OperandTy->isVectorTy()) {
4867 auto *OperandVecTy = cast<VectorType>(OperandTy);
4868 auto *ResultVecTy = cast<VectorType>(ResultTy);
4869 Assert(OperandVecTy->getNumElements() == ResultVecTy->getNumElements(),
4870 "Intrinsic first argument and result vector lengths must be equal",
4871 &FPI);
4872 }
4873 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
4874 Assert(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
4875 "Intrinsic first argument's type must be larger than result type",
4876 &FPI);
4877 } else {
4878 Assert(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
4879 "Intrinsic first argument's type must be smaller than result type",
4880 &FPI);
4881 }
4882 }
4883 break;
4884
4885 default:
4886 break;
4887 }
4888
4889 // If a non-metadata argument is passed in a metadata slot then the
4890 // error will be caught earlier when the incorrect argument doesn't
4891 // match the specification in the intrinsic call table. Thus, no
4892 // argument type check is needed here.
4893
4894 Assert(FPI.getExceptionBehavior().hasValue(),
4895 "invalid exception behavior argument", &FPI);
4896 if (HasRoundingMD) {
4897 Assert(FPI.getRoundingMode().hasValue(),
4898 "invalid rounding mode argument", &FPI);
4899 }
4900 }
4901
visitDbgIntrinsic(StringRef Kind,DbgVariableIntrinsic & DII)4902 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
4903 auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4904 AssertDI(isa<ValueAsMetadata>(MD) ||
4905 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4906 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4907 AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4908 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4909 DII.getRawVariable());
4910 AssertDI(isa<DIExpression>(DII.getRawExpression()),
4911 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4912 DII.getRawExpression());
4913
4914 // Ignore broken !dbg attachments; they're checked elsewhere.
4915 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4916 if (!isa<DILocation>(N))
4917 return;
4918
4919 BasicBlock *BB = DII.getParent();
4920 Function *F = BB ? BB->getParent() : nullptr;
4921
4922 // The scopes for variables and !dbg attachments must agree.
4923 DILocalVariable *Var = DII.getVariable();
4924 DILocation *Loc = DII.getDebugLoc();
4925 AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4926 &DII, BB, F);
4927
4928 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4929 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4930 if (!VarSP || !LocSP)
4931 return; // Broken scope chains are checked elsewhere.
4932
4933 AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4934 " variable and !dbg attachment",
4935 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4936 Loc->getScope()->getSubprogram());
4937
4938 // This check is redundant with one in visitLocalVariable().
4939 AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
4940 Var->getRawType());
4941 verifyFnArgs(DII);
4942 }
4943
visitDbgLabelIntrinsic(StringRef Kind,DbgLabelInst & DLI)4944 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
4945 AssertDI(isa<DILabel>(DLI.getRawLabel()),
4946 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
4947 DLI.getRawLabel());
4948
4949 // Ignore broken !dbg attachments; they're checked elsewhere.
4950 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
4951 if (!isa<DILocation>(N))
4952 return;
4953
4954 BasicBlock *BB = DLI.getParent();
4955 Function *F = BB ? BB->getParent() : nullptr;
4956
4957 // The scopes for variables and !dbg attachments must agree.
4958 DILabel *Label = DLI.getLabel();
4959 DILocation *Loc = DLI.getDebugLoc();
4960 Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4961 &DLI, BB, F);
4962
4963 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
4964 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4965 if (!LabelSP || !LocSP)
4966 return;
4967
4968 AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4969 " label and !dbg attachment",
4970 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
4971 Loc->getScope()->getSubprogram());
4972 }
4973
verifyFragmentExpression(const DbgVariableIntrinsic & I)4974 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
4975 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4976 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4977
4978 // We don't know whether this intrinsic verified correctly.
4979 if (!V || !E || !E->isValid())
4980 return;
4981
4982 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4983 auto Fragment = E->getFragmentInfo();
4984 if (!Fragment)
4985 return;
4986
4987 // The frontend helps out GDB by emitting the members of local anonymous
4988 // unions as artificial local variables with shared storage. When SROA splits
4989 // the storage for artificial local variables that are smaller than the entire
4990 // union, the overhang piece will be outside of the allotted space for the
4991 // variable and this check fails.
4992 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4993 if (V->isArtificial())
4994 return;
4995
4996 verifyFragmentExpression(*V, *Fragment, &I);
4997 }
4998
4999 template <typename ValueOrMetadata>
verifyFragmentExpression(const DIVariable & V,DIExpression::FragmentInfo Fragment,ValueOrMetadata * Desc)5000 void Verifier::verifyFragmentExpression(const DIVariable &V,
5001 DIExpression::FragmentInfo Fragment,
5002 ValueOrMetadata *Desc) {
5003 // If there's no size, the type is broken, but that should be checked
5004 // elsewhere.
5005 auto VarSize = V.getSizeInBits();
5006 if (!VarSize)
5007 return;
5008
5009 unsigned FragSize = Fragment.SizeInBits;
5010 unsigned FragOffset = Fragment.OffsetInBits;
5011 AssertDI(FragSize + FragOffset <= *VarSize,
5012 "fragment is larger than or outside of variable", Desc, &V);
5013 AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
5014 }
5015
verifyFnArgs(const DbgVariableIntrinsic & I)5016 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
5017 // This function does not take the scope of noninlined function arguments into
5018 // account. Don't run it if current function is nodebug, because it may
5019 // contain inlined debug intrinsics.
5020 if (!HasDebugInfo)
5021 return;
5022
5023 // For performance reasons only check non-inlined ones.
5024 if (I.getDebugLoc()->getInlinedAt())
5025 return;
5026
5027 DILocalVariable *Var = I.getVariable();
5028 AssertDI(Var, "dbg intrinsic without variable");
5029
5030 unsigned ArgNo = Var->getArg();
5031 if (!ArgNo)
5032 return;
5033
5034 // Verify there are no duplicate function argument debug info entries.
5035 // These will cause hard-to-debug assertions in the DWARF backend.
5036 if (DebugFnArgs.size() < ArgNo)
5037 DebugFnArgs.resize(ArgNo, nullptr);
5038
5039 auto *Prev = DebugFnArgs[ArgNo - 1];
5040 DebugFnArgs[ArgNo - 1] = Var;
5041 AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
5042 Prev, Var);
5043 }
5044
verifyNotEntryValue(const DbgVariableIntrinsic & I)5045 void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
5046 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
5047
5048 // We don't know whether this intrinsic verified correctly.
5049 if (!E || !E->isValid())
5050 return;
5051
5052 AssertDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
5053 }
5054
verifyCompileUnits()5055 void Verifier::verifyCompileUnits() {
5056 // When more than one Module is imported into the same context, such as during
5057 // an LTO build before linking the modules, ODR type uniquing may cause types
5058 // to point to a different CU. This check does not make sense in this case.
5059 if (M.getContext().isODRUniquingDebugTypes())
5060 return;
5061 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
5062 SmallPtrSet<const Metadata *, 2> Listed;
5063 if (CUs)
5064 Listed.insert(CUs->op_begin(), CUs->op_end());
5065 for (auto *CU : CUVisited)
5066 AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
5067 CUVisited.clear();
5068 }
5069
verifyDeoptimizeCallingConvs()5070 void Verifier::verifyDeoptimizeCallingConvs() {
5071 if (DeoptimizeDeclarations.empty())
5072 return;
5073
5074 const Function *First = DeoptimizeDeclarations[0];
5075 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
5076 Assert(First->getCallingConv() == F->getCallingConv(),
5077 "All llvm.experimental.deoptimize declarations must have the same "
5078 "calling convention",
5079 First, F);
5080 }
5081 }
5082
verifySourceDebugInfo(const DICompileUnit & U,const DIFile & F)5083 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
5084 bool HasSource = F.getSource().hasValue();
5085 if (!HasSourceDebugInfo.count(&U))
5086 HasSourceDebugInfo[&U] = HasSource;
5087 AssertDI(HasSource == HasSourceDebugInfo[&U],
5088 "inconsistent use of embedded source");
5089 }
5090
5091 //===----------------------------------------------------------------------===//
5092 // Implement the public interfaces to this file...
5093 //===----------------------------------------------------------------------===//
5094
verifyFunction(const Function & f,raw_ostream * OS)5095 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
5096 Function &F = const_cast<Function &>(f);
5097
5098 // Don't use a raw_null_ostream. Printing IR is expensive.
5099 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
5100
5101 // Note that this function's return value is inverted from what you would
5102 // expect of a function called "verify".
5103 return !V.verify(F);
5104 }
5105
verifyModule(const Module & M,raw_ostream * OS,bool * BrokenDebugInfo)5106 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
5107 bool *BrokenDebugInfo) {
5108 // Don't use a raw_null_ostream. Printing IR is expensive.
5109 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
5110
5111 bool Broken = false;
5112 for (const Function &F : M)
5113 Broken |= !V.verify(F);
5114
5115 Broken |= !V.verify();
5116 if (BrokenDebugInfo)
5117 *BrokenDebugInfo = V.hasBrokenDebugInfo();
5118 // Note that this function's return value is inverted from what you would
5119 // expect of a function called "verify".
5120 return Broken;
5121 }
5122
5123 namespace {
5124
5125 struct VerifierLegacyPass : public FunctionPass {
5126 static char ID;
5127
5128 std::unique_ptr<Verifier> V;
5129 bool FatalErrors = true;
5130
VerifierLegacyPass__anonf9e31b020911::VerifierLegacyPass5131 VerifierLegacyPass() : FunctionPass(ID) {
5132 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5133 }
VerifierLegacyPass__anonf9e31b020911::VerifierLegacyPass5134 explicit VerifierLegacyPass(bool FatalErrors)
5135 : FunctionPass(ID),
5136 FatalErrors(FatalErrors) {
5137 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5138 }
5139
doInitialization__anonf9e31b020911::VerifierLegacyPass5140 bool doInitialization(Module &M) override {
5141 V = std::make_unique<Verifier>(
5142 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
5143 return false;
5144 }
5145
runOnFunction__anonf9e31b020911::VerifierLegacyPass5146 bool runOnFunction(Function &F) override {
5147 if (!V->verify(F) && FatalErrors) {
5148 errs() << "in function " << F.getName() << '\n';
5149 report_fatal_error("Broken function found, compilation aborted!");
5150 }
5151 return false;
5152 }
5153
doFinalization__anonf9e31b020911::VerifierLegacyPass5154 bool doFinalization(Module &M) override {
5155 bool HasErrors = false;
5156 for (Function &F : M)
5157 if (F.isDeclaration())
5158 HasErrors |= !V->verify(F);
5159
5160 HasErrors |= !V->verify();
5161 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
5162 report_fatal_error("Broken module found, compilation aborted!");
5163 return false;
5164 }
5165
getAnalysisUsage__anonf9e31b020911::VerifierLegacyPass5166 void getAnalysisUsage(AnalysisUsage &AU) const override {
5167 AU.setPreservesAll();
5168 }
5169 };
5170
5171 } // end anonymous namespace
5172
5173 /// Helper to issue failure from the TBAA verification
CheckFailed(Tys &&...Args)5174 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
5175 if (Diagnostic)
5176 return Diagnostic->CheckFailed(Args...);
5177 }
5178
5179 #define AssertTBAA(C, ...) \
5180 do { \
5181 if (!(C)) { \
5182 CheckFailed(__VA_ARGS__); \
5183 return false; \
5184 } \
5185 } while (false)
5186
5187 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
5188 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
5189 /// struct-type node describing an aggregate data structure (like a struct).
5190 TBAAVerifier::TBAABaseNodeSummary
verifyTBAABaseNode(Instruction & I,const MDNode * BaseNode,bool IsNewFormat)5191 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
5192 bool IsNewFormat) {
5193 if (BaseNode->getNumOperands() < 2) {
5194 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
5195 return {true, ~0u};
5196 }
5197
5198 auto Itr = TBAABaseNodes.find(BaseNode);
5199 if (Itr != TBAABaseNodes.end())
5200 return Itr->second;
5201
5202 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
5203 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
5204 (void)InsertResult;
5205 assert(InsertResult.second && "We just checked!");
5206 return Result;
5207 }
5208
5209 TBAAVerifier::TBAABaseNodeSummary
verifyTBAABaseNodeImpl(Instruction & I,const MDNode * BaseNode,bool IsNewFormat)5210 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
5211 bool IsNewFormat) {
5212 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
5213
5214 if (BaseNode->getNumOperands() == 2) {
5215 // Scalar nodes can only be accessed at offset 0.
5216 return isValidScalarTBAANode(BaseNode)
5217 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
5218 : InvalidNode;
5219 }
5220
5221 if (IsNewFormat) {
5222 if (BaseNode->getNumOperands() % 3 != 0) {
5223 CheckFailed("Access tag nodes must have the number of operands that is a "
5224 "multiple of 3!", BaseNode);
5225 return InvalidNode;
5226 }
5227 } else {
5228 if (BaseNode->getNumOperands() % 2 != 1) {
5229 CheckFailed("Struct tag nodes must have an odd number of operands!",
5230 BaseNode);
5231 return InvalidNode;
5232 }
5233 }
5234
5235 // Check the type size field.
5236 if (IsNewFormat) {
5237 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5238 BaseNode->getOperand(1));
5239 if (!TypeSizeNode) {
5240 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
5241 return InvalidNode;
5242 }
5243 }
5244
5245 // Check the type name field. In the new format it can be anything.
5246 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
5247 CheckFailed("Struct tag nodes have a string as their first operand",
5248 BaseNode);
5249 return InvalidNode;
5250 }
5251
5252 bool Failed = false;
5253
5254 Optional<APInt> PrevOffset;
5255 unsigned BitWidth = ~0u;
5256
5257 // We've already checked that BaseNode is not a degenerate root node with one
5258 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
5259 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5260 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5261 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5262 Idx += NumOpsPerField) {
5263 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
5264 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
5265 if (!isa<MDNode>(FieldTy)) {
5266 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
5267 Failed = true;
5268 continue;
5269 }
5270
5271 auto *OffsetEntryCI =
5272 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
5273 if (!OffsetEntryCI) {
5274 CheckFailed("Offset entries must be constants!", &I, BaseNode);
5275 Failed = true;
5276 continue;
5277 }
5278
5279 if (BitWidth == ~0u)
5280 BitWidth = OffsetEntryCI->getBitWidth();
5281
5282 if (OffsetEntryCI->getBitWidth() != BitWidth) {
5283 CheckFailed(
5284 "Bitwidth between the offsets and struct type entries must match", &I,
5285 BaseNode);
5286 Failed = true;
5287 continue;
5288 }
5289
5290 // NB! As far as I can tell, we generate a non-strictly increasing offset
5291 // sequence only from structs that have zero size bit fields. When
5292 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5293 // pick the field lexically the latest in struct type metadata node. This
5294 // mirrors the actual behavior of the alias analysis implementation.
5295 bool IsAscending =
5296 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5297
5298 if (!IsAscending) {
5299 CheckFailed("Offsets must be increasing!", &I, BaseNode);
5300 Failed = true;
5301 }
5302
5303 PrevOffset = OffsetEntryCI->getValue();
5304
5305 if (IsNewFormat) {
5306 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5307 BaseNode->getOperand(Idx + 2));
5308 if (!MemberSizeNode) {
5309 CheckFailed("Member size entries must be constants!", &I, BaseNode);
5310 Failed = true;
5311 continue;
5312 }
5313 }
5314 }
5315
5316 return Failed ? InvalidNode
5317 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5318 }
5319
IsRootTBAANode(const MDNode * MD)5320 static bool IsRootTBAANode(const MDNode *MD) {
5321 return MD->getNumOperands() < 2;
5322 }
5323
IsScalarTBAANodeImpl(const MDNode * MD,SmallPtrSetImpl<const MDNode * > & Visited)5324 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5325 SmallPtrSetImpl<const MDNode *> &Visited) {
5326 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5327 return false;
5328
5329 if (!isa<MDString>(MD->getOperand(0)))
5330 return false;
5331
5332 if (MD->getNumOperands() == 3) {
5333 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5334 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5335 return false;
5336 }
5337
5338 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5339 return Parent && Visited.insert(Parent).second &&
5340 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5341 }
5342
isValidScalarTBAANode(const MDNode * MD)5343 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5344 auto ResultIt = TBAAScalarNodes.find(MD);
5345 if (ResultIt != TBAAScalarNodes.end())
5346 return ResultIt->second;
5347
5348 SmallPtrSet<const MDNode *, 4> Visited;
5349 bool Result = IsScalarTBAANodeImpl(MD, Visited);
5350 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5351 (void)InsertResult;
5352 assert(InsertResult.second && "Just checked!");
5353
5354 return Result;
5355 }
5356
5357 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
5358 /// Offset in place to be the offset within the field node returned.
5359 ///
5360 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
getFieldNodeFromTBAABaseNode(Instruction & I,const MDNode * BaseNode,APInt & Offset,bool IsNewFormat)5361 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5362 const MDNode *BaseNode,
5363 APInt &Offset,
5364 bool IsNewFormat) {
5365 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5366
5367 // Scalar nodes have only one possible "field" -- their parent in the access
5368 // hierarchy. Offset must be zero at this point, but our caller is supposed
5369 // to Assert that.
5370 if (BaseNode->getNumOperands() == 2)
5371 return cast<MDNode>(BaseNode->getOperand(1));
5372
5373 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5374 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5375 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5376 Idx += NumOpsPerField) {
5377 auto *OffsetEntryCI =
5378 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5379 if (OffsetEntryCI->getValue().ugt(Offset)) {
5380 if (Idx == FirstFieldOpNo) {
5381 CheckFailed("Could not find TBAA parent in struct type node", &I,
5382 BaseNode, &Offset);
5383 return nullptr;
5384 }
5385
5386 unsigned PrevIdx = Idx - NumOpsPerField;
5387 auto *PrevOffsetEntryCI =
5388 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5389 Offset -= PrevOffsetEntryCI->getValue();
5390 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5391 }
5392 }
5393
5394 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5395 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5396 BaseNode->getOperand(LastIdx + 1));
5397 Offset -= LastOffsetEntryCI->getValue();
5398 return cast<MDNode>(BaseNode->getOperand(LastIdx));
5399 }
5400
isNewFormatTBAATypeNode(llvm::MDNode * Type)5401 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
5402 if (!Type || Type->getNumOperands() < 3)
5403 return false;
5404
5405 // In the new format type nodes shall have a reference to the parent type as
5406 // its first operand.
5407 MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5408 if (!Parent)
5409 return false;
5410
5411 return true;
5412 }
5413
visitTBAAMetadata(Instruction & I,const MDNode * MD)5414 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
5415 AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5416 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5417 isa<AtomicCmpXchgInst>(I),
5418 "This instruction shall not have a TBAA access tag!", &I);
5419
5420 bool IsStructPathTBAA =
5421 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5422
5423 AssertTBAA(
5424 IsStructPathTBAA,
5425 "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5426
5427 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5428 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5429
5430 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5431
5432 if (IsNewFormat) {
5433 AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5434 "Access tag metadata must have either 4 or 5 operands", &I, MD);
5435 } else {
5436 AssertTBAA(MD->getNumOperands() < 5,
5437 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5438 }
5439
5440 // Check the access size field.
5441 if (IsNewFormat) {
5442 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5443 MD->getOperand(3));
5444 AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5445 }
5446
5447 // Check the immutability flag.
5448 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5449 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5450 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5451 MD->getOperand(ImmutabilityFlagOpNo));
5452 AssertTBAA(IsImmutableCI,
5453 "Immutability tag on struct tag metadata must be a constant",
5454 &I, MD);
5455 AssertTBAA(
5456 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5457 "Immutability part of the struct tag metadata must be either 0 or 1",
5458 &I, MD);
5459 }
5460
5461 AssertTBAA(BaseNode && AccessType,
5462 "Malformed struct tag metadata: base and access-type "
5463 "should be non-null and point to Metadata nodes",
5464 &I, MD, BaseNode, AccessType);
5465
5466 if (!IsNewFormat) {
5467 AssertTBAA(isValidScalarTBAANode(AccessType),
5468 "Access type node must be a valid scalar type", &I, MD,
5469 AccessType);
5470 }
5471
5472 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
5473 AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
5474
5475 APInt Offset = OffsetCI->getValue();
5476 bool SeenAccessTypeInPath = false;
5477
5478 SmallPtrSet<MDNode *, 4> StructPath;
5479
5480 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
5481 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
5482 IsNewFormat)) {
5483 if (!StructPath.insert(BaseNode).second) {
5484 CheckFailed("Cycle detected in struct path", &I, MD);
5485 return false;
5486 }
5487
5488 bool Invalid;
5489 unsigned BaseNodeBitWidth;
5490 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
5491 IsNewFormat);
5492
5493 // If the base node is invalid in itself, then we've already printed all the
5494 // errors we wanted to print.
5495 if (Invalid)
5496 return false;
5497
5498 SeenAccessTypeInPath |= BaseNode == AccessType;
5499
5500 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
5501 AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
5502 &I, MD, &Offset);
5503
5504 AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
5505 (BaseNodeBitWidth == 0 && Offset == 0) ||
5506 (IsNewFormat && BaseNodeBitWidth == ~0u),
5507 "Access bit-width not the same as description bit-width", &I, MD,
5508 BaseNodeBitWidth, Offset.getBitWidth());
5509
5510 if (IsNewFormat && SeenAccessTypeInPath)
5511 break;
5512 }
5513
5514 AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
5515 &I, MD);
5516 return true;
5517 }
5518
5519 char VerifierLegacyPass::ID = 0;
5520 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
5521
createVerifierPass(bool FatalErrors)5522 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
5523 return new VerifierLegacyPass(FatalErrors);
5524 }
5525
5526 AnalysisKey VerifierAnalysis::Key;
run(Module & M,ModuleAnalysisManager &)5527 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
5528 ModuleAnalysisManager &) {
5529 Result Res;
5530 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
5531 return Res;
5532 }
5533
run(Function & F,FunctionAnalysisManager &)5534 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
5535 FunctionAnalysisManager &) {
5536 return { llvm::verifyFunction(F, &dbgs()), false };
5537 }
5538
run(Module & M,ModuleAnalysisManager & AM)5539 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
5540 auto Res = AM.getResult<VerifierAnalysis>(M);
5541 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
5542 report_fatal_error("Broken module found, compilation aborted!");
5543
5544 return PreservedAnalyses::all();
5545 }
5546
run(Function & F,FunctionAnalysisManager & AM)5547 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
5548 auto res = AM.getResult<VerifierAnalysis>(F);
5549 if (res.IRBroken && FatalErrors)
5550 report_fatal_error("Broken function found, compilation aborted!");
5551
5552 return PreservedAnalyses::all();
5553 }
5554