Verifier.cpp revision 327952
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the function verifier interface, that can be used for some
11// sanity checking of input to the system.
12//
13// Note that this does not provide full `Java style' security and verifications,
14// instead it just tries to ensure that code is well-formed.
15//
16//  * Both of a binary operator's parameters are of the same type
17//  * Verify that the indices of mem access instructions match other operands
18//  * Verify that arithmetic and other things are only performed on first-class
19//    types.  Verify that shifts & logicals only happen on integrals f.e.
20//  * All of the constants in a switch statement are of the correct type
21//  * The code is in valid SSA form
22//  * It should be illegal to put a label into any other type (like a structure)
23//    or to return one. [except constant arrays!]
24//  * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
25//  * PHI nodes must have an entry for each predecessor, with no extras.
26//  * PHI nodes must be the first thing in a basic block, all grouped together
27//  * PHI nodes must have at least one entry
28//  * All basic blocks should only end with terminator insts, not contain them
29//  * The entry node to a function must not have predecessors
30//  * All Instructions must be embedded into a basic block
31//  * Functions cannot take a void-typed parameter
32//  * Verify that a function's argument list agrees with it's declared type.
33//  * It is illegal to specify a name for a void value.
34//  * It is illegal to have a internal global value with no initializer
35//  * It is illegal to have a ret instruction that returns a value that does not
36//    agree with the function return value type.
37//  * Function call argument types match the function prototype
38//  * A landing pad is defined by a landingpad instruction, and can be jumped to
39//    only by the unwind edge of an invoke instruction.
40//  * A landingpad instruction must be the first non-PHI instruction in the
41//    block.
42//  * Landingpad instructions must be in a function with a personality function.
43//  * All other things that are tested by asserts spread about the code...
44//
45//===----------------------------------------------------------------------===//
46
47#include "llvm/IR/Verifier.h"
48#include "llvm/ADT/APFloat.h"
49#include "llvm/ADT/APInt.h"
50#include "llvm/ADT/ArrayRef.h"
51#include "llvm/ADT/DenseMap.h"
52#include "llvm/ADT/MapVector.h"
53#include "llvm/ADT/Optional.h"
54#include "llvm/ADT/STLExtras.h"
55#include "llvm/ADT/SmallPtrSet.h"
56#include "llvm/ADT/SmallSet.h"
57#include "llvm/ADT/SmallVector.h"
58#include "llvm/ADT/StringMap.h"
59#include "llvm/ADT/StringRef.h"
60#include "llvm/ADT/Twine.h"
61#include "llvm/ADT/ilist.h"
62#include "llvm/BinaryFormat/Dwarf.h"
63#include "llvm/IR/Argument.h"
64#include "llvm/IR/Attributes.h"
65#include "llvm/IR/BasicBlock.h"
66#include "llvm/IR/CFG.h"
67#include "llvm/IR/CallSite.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
71#include "llvm/IR/ConstantRange.h"
72#include "llvm/IR/Constants.h"
73#include "llvm/IR/DataLayout.h"
74#include "llvm/IR/DebugInfo.h"
75#include "llvm/IR/DebugInfoMetadata.h"
76#include "llvm/IR/DebugLoc.h"
77#include "llvm/IR/DerivedTypes.h"
78#include "llvm/IR/Dominators.h"
79#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalAlias.h"
81#include "llvm/IR/GlobalValue.h"
82#include "llvm/IR/GlobalVariable.h"
83#include "llvm/IR/InlineAsm.h"
84#include "llvm/IR/InstVisitor.h"
85#include "llvm/IR/InstrTypes.h"
86#include "llvm/IR/Instruction.h"
87#include "llvm/IR/Instructions.h"
88#include "llvm/IR/IntrinsicInst.h"
89#include "llvm/IR/Intrinsics.h"
90#include "llvm/IR/LLVMContext.h"
91#include "llvm/IR/Metadata.h"
92#include "llvm/IR/Module.h"
93#include "llvm/IR/ModuleSlotTracker.h"
94#include "llvm/IR/PassManager.h"
95#include "llvm/IR/Statepoint.h"
96#include "llvm/IR/Type.h"
97#include "llvm/IR/Use.h"
98#include "llvm/IR/User.h"
99#include "llvm/IR/Value.h"
100#include "llvm/Pass.h"
101#include "llvm/Support/AtomicOrdering.h"
102#include "llvm/Support/Casting.h"
103#include "llvm/Support/CommandLine.h"
104#include "llvm/Support/Debug.h"
105#include "llvm/Support/ErrorHandling.h"
106#include "llvm/Support/MathExtras.h"
107#include "llvm/Support/raw_ostream.h"
108#include <algorithm>
109#include <cassert>
110#include <cstdint>
111#include <memory>
112#include <string>
113#include <utility>
114
115using namespace llvm;
116
117namespace llvm {
118
119struct VerifierSupport {
120  raw_ostream *OS;
121  const Module &M;
122  ModuleSlotTracker MST;
123  const DataLayout &DL;
124  LLVMContext &Context;
125
126  /// Track the brokenness of the module while recursively visiting.
127  bool Broken = false;
128  /// Broken debug info can be "recovered" from by stripping the debug info.
129  bool BrokenDebugInfo = false;
130  /// Whether to treat broken debug info as an error.
131  bool TreatBrokenDebugInfoAsError = true;
132
133  explicit VerifierSupport(raw_ostream *OS, const Module &M)
134      : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
135
136private:
137  void Write(const Module *M) {
138    *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
139  }
140
141  void Write(const Value *V) {
142    if (!V)
143      return;
144    if (isa<Instruction>(V)) {
145      V->print(*OS, MST);
146      *OS << '\n';
147    } else {
148      V->printAsOperand(*OS, true, MST);
149      *OS << '\n';
150    }
151  }
152
153  void Write(ImmutableCallSite CS) {
154    Write(CS.getInstruction());
155  }
156
157  void Write(const Metadata *MD) {
158    if (!MD)
159      return;
160    MD->print(*OS, MST, &M);
161    *OS << '\n';
162  }
163
164  template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
165    Write(MD.get());
166  }
167
168  void Write(const NamedMDNode *NMD) {
169    if (!NMD)
170      return;
171    NMD->print(*OS, MST);
172    *OS << '\n';
173  }
174
175  void Write(Type *T) {
176    if (!T)
177      return;
178    *OS << ' ' << *T;
179  }
180
181  void Write(const Comdat *C) {
182    if (!C)
183      return;
184    *OS << *C;
185  }
186
187  void Write(const APInt *AI) {
188    if (!AI)
189      return;
190    *OS << *AI << '\n';
191  }
192
193  void Write(const unsigned i) { *OS << i << '\n'; }
194
195  template <typename T> void Write(ArrayRef<T> Vs) {
196    for (const T &V : Vs)
197      Write(V);
198  }
199
200  template <typename T1, typename... Ts>
201  void WriteTs(const T1 &V1, const Ts &... Vs) {
202    Write(V1);
203    WriteTs(Vs...);
204  }
205
206  template <typename... Ts> void WriteTs() {}
207
208public:
209  /// \brief A check failed, so printout out the condition and the message.
210  ///
211  /// This provides a nice place to put a breakpoint if you want to see why
212  /// something is not correct.
213  void CheckFailed(const Twine &Message) {
214    if (OS)
215      *OS << Message << '\n';
216    Broken = true;
217  }
218
219  /// \brief A check failed (with values to print).
220  ///
221  /// This calls the Message-only version so that the above is easier to set a
222  /// breakpoint on.
223  template <typename T1, typename... Ts>
224  void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
225    CheckFailed(Message);
226    if (OS)
227      WriteTs(V1, Vs...);
228  }
229
230  /// A debug info check failed.
231  void DebugInfoCheckFailed(const Twine &Message) {
232    if (OS)
233      *OS << Message << '\n';
234    Broken |= TreatBrokenDebugInfoAsError;
235    BrokenDebugInfo = true;
236  }
237
238  /// A debug info check failed (with values to print).
239  template <typename T1, typename... Ts>
240  void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
241                            const Ts &... Vs) {
242    DebugInfoCheckFailed(Message);
243    if (OS)
244      WriteTs(V1, Vs...);
245  }
246};
247
248} // namespace llvm
249
250namespace {
251
252class Verifier : public InstVisitor<Verifier>, VerifierSupport {
253  friend class InstVisitor<Verifier>;
254
255  DominatorTree DT;
256
257  /// \brief When verifying a basic block, keep track of all of the
258  /// instructions we have seen so far.
259  ///
260  /// This allows us to do efficient dominance checks for the case when an
261  /// instruction has an operand that is an instruction in the same block.
262  SmallPtrSet<Instruction *, 16> InstsInThisBlock;
263
264  /// \brief Keep track of the metadata nodes that have been checked already.
265  SmallPtrSet<const Metadata *, 32> MDNodes;
266
267  /// Keep track which DISubprogram is attached to which function.
268  DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
269
270  /// Track all DICompileUnits visited.
271  SmallPtrSet<const Metadata *, 2> CUVisited;
272
273  /// \brief The result type for a landingpad.
274  Type *LandingPadResultTy;
275
276  /// \brief Whether we've seen a call to @llvm.localescape in this function
277  /// already.
278  bool SawFrameEscape;
279
280  /// Whether the current function has a DISubprogram attached to it.
281  bool HasDebugInfo = false;
282
283  /// Stores the count of how many objects were passed to llvm.localescape for a
284  /// given function and the largest index passed to llvm.localrecover.
285  DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
286
287  // Maps catchswitches and cleanuppads that unwind to siblings to the
288  // terminators that indicate the unwind, used to detect cycles therein.
289  MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo;
290
291  /// Cache of constants visited in search of ConstantExprs.
292  SmallPtrSet<const Constant *, 32> ConstantExprVisited;
293
294  /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
295  SmallVector<const Function *, 4> DeoptimizeDeclarations;
296
297  // Verify that this GlobalValue is only used in this module.
298  // This map is used to avoid visiting uses twice. We can arrive at a user
299  // twice, if they have multiple operands. In particular for very large
300  // constant expressions, we can arrive at a particular user many times.
301  SmallPtrSet<const Value *, 32> GlobalValueVisited;
302
303  // Keeps track of duplicate function argument debug info.
304  SmallVector<const DILocalVariable *, 16> DebugFnArgs;
305
306  TBAAVerifier TBAAVerifyHelper;
307
308  void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
309
310public:
311  explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
312                    const Module &M)
313      : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
314        SawFrameEscape(false), TBAAVerifyHelper(this) {
315    TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
316  }
317
318  bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
319
320  bool verify(const Function &F) {
321    assert(F.getParent() == &M &&
322           "An instance of this class only works with a specific module!");
323
324    // First ensure the function is well-enough formed to compute dominance
325    // information, and directly compute a dominance tree. We don't rely on the
326    // pass manager to provide this as it isolates us from a potentially
327    // out-of-date dominator tree and makes it significantly more complex to run
328    // this code outside of a pass manager.
329    // FIXME: It's really gross that we have to cast away constness here.
330    if (!F.empty())
331      DT.recalculate(const_cast<Function &>(F));
332
333    for (const BasicBlock &BB : F) {
334      if (!BB.empty() && BB.back().isTerminator())
335        continue;
336
337      if (OS) {
338        *OS << "Basic Block in function '" << F.getName()
339            << "' does not have terminator!\n";
340        BB.printAsOperand(*OS, true, MST);
341        *OS << "\n";
342      }
343      return false;
344    }
345
346    Broken = false;
347    // FIXME: We strip const here because the inst visitor strips const.
348    visit(const_cast<Function &>(F));
349    verifySiblingFuncletUnwinds();
350    InstsInThisBlock.clear();
351    DebugFnArgs.clear();
352    LandingPadResultTy = nullptr;
353    SawFrameEscape = false;
354    SiblingFuncletInfo.clear();
355
356    return !Broken;
357  }
358
359  /// Verify the module that this instance of \c Verifier was initialized with.
360  bool verify() {
361    Broken = false;
362
363    // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
364    for (const Function &F : M)
365      if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
366        DeoptimizeDeclarations.push_back(&F);
367
368    // Now that we've visited every function, verify that we never asked to
369    // recover a frame index that wasn't escaped.
370    verifyFrameRecoverIndices();
371    for (const GlobalVariable &GV : M.globals())
372      visitGlobalVariable(GV);
373
374    for (const GlobalAlias &GA : M.aliases())
375      visitGlobalAlias(GA);
376
377    for (const NamedMDNode &NMD : M.named_metadata())
378      visitNamedMDNode(NMD);
379
380    for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
381      visitComdat(SMEC.getValue());
382
383    visitModuleFlags(M);
384    visitModuleIdents(M);
385
386    verifyCompileUnits();
387
388    verifyDeoptimizeCallingConvs();
389    DISubprogramAttachments.clear();
390    return !Broken;
391  }
392
393private:
394  // Verification methods...
395  void visitGlobalValue(const GlobalValue &GV);
396  void visitGlobalVariable(const GlobalVariable &GV);
397  void visitGlobalAlias(const GlobalAlias &GA);
398  void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
399  void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
400                           const GlobalAlias &A, const Constant &C);
401  void visitNamedMDNode(const NamedMDNode &NMD);
402  void visitMDNode(const MDNode &MD);
403  void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
404  void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
405  void visitComdat(const Comdat &C);
406  void visitModuleIdents(const Module &M);
407  void visitModuleFlags(const Module &M);
408  void visitModuleFlag(const MDNode *Op,
409                       DenseMap<const MDString *, const MDNode *> &SeenIDs,
410                       SmallVectorImpl<const MDNode *> &Requirements);
411  void visitFunction(const Function &F);
412  void visitBasicBlock(BasicBlock &BB);
413  void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
414  void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
415
416  template <class Ty> bool isValidMetadataArray(const MDTuple &N);
417#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
418#include "llvm/IR/Metadata.def"
419  void visitDIScope(const DIScope &N);
420  void visitDIVariable(const DIVariable &N);
421  void visitDILexicalBlockBase(const DILexicalBlockBase &N);
422  void visitDITemplateParameter(const DITemplateParameter &N);
423
424  void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
425
426  // InstVisitor overrides...
427  using InstVisitor<Verifier>::visit;
428  void visit(Instruction &I);
429
430  void visitTruncInst(TruncInst &I);
431  void visitZExtInst(ZExtInst &I);
432  void visitSExtInst(SExtInst &I);
433  void visitFPTruncInst(FPTruncInst &I);
434  void visitFPExtInst(FPExtInst &I);
435  void visitFPToUIInst(FPToUIInst &I);
436  void visitFPToSIInst(FPToSIInst &I);
437  void visitUIToFPInst(UIToFPInst &I);
438  void visitSIToFPInst(SIToFPInst &I);
439  void visitIntToPtrInst(IntToPtrInst &I);
440  void visitPtrToIntInst(PtrToIntInst &I);
441  void visitBitCastInst(BitCastInst &I);
442  void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
443  void visitPHINode(PHINode &PN);
444  void visitBinaryOperator(BinaryOperator &B);
445  void visitICmpInst(ICmpInst &IC);
446  void visitFCmpInst(FCmpInst &FC);
447  void visitExtractElementInst(ExtractElementInst &EI);
448  void visitInsertElementInst(InsertElementInst &EI);
449  void visitShuffleVectorInst(ShuffleVectorInst &EI);
450  void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
451  void visitCallInst(CallInst &CI);
452  void visitInvokeInst(InvokeInst &II);
453  void visitGetElementPtrInst(GetElementPtrInst &GEP);
454  void visitLoadInst(LoadInst &LI);
455  void visitStoreInst(StoreInst &SI);
456  void verifyDominatesUse(Instruction &I, unsigned i);
457  void visitInstruction(Instruction &I);
458  void visitTerminatorInst(TerminatorInst &I);
459  void visitBranchInst(BranchInst &BI);
460  void visitReturnInst(ReturnInst &RI);
461  void visitSwitchInst(SwitchInst &SI);
462  void visitIndirectBrInst(IndirectBrInst &BI);
463  void visitSelectInst(SelectInst &SI);
464  void visitUserOp1(Instruction &I);
465  void visitUserOp2(Instruction &I) { visitUserOp1(I); }
466  void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
467  void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
468  void visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII);
469  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
470  void visitAtomicRMWInst(AtomicRMWInst &RMWI);
471  void visitFenceInst(FenceInst &FI);
472  void visitAllocaInst(AllocaInst &AI);
473  void visitExtractValueInst(ExtractValueInst &EVI);
474  void visitInsertValueInst(InsertValueInst &IVI);
475  void visitEHPadPredecessors(Instruction &I);
476  void visitLandingPadInst(LandingPadInst &LPI);
477  void visitResumeInst(ResumeInst &RI);
478  void visitCatchPadInst(CatchPadInst &CPI);
479  void visitCatchReturnInst(CatchReturnInst &CatchReturn);
480  void visitCleanupPadInst(CleanupPadInst &CPI);
481  void visitFuncletPadInst(FuncletPadInst &FPI);
482  void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
483  void visitCleanupReturnInst(CleanupReturnInst &CRI);
484
485  void verifyCallSite(CallSite CS);
486  void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
487  void verifySwiftErrorValue(const Value *SwiftErrorVal);
488  void verifyMustTailCall(CallInst &CI);
489  bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
490                        unsigned ArgNo, std::string &Suffix);
491  bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
492  void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
493                            const Value *V);
494  void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
495  void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
496                           const Value *V);
497  void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
498
499  void visitConstantExprsRecursively(const Constant *EntryC);
500  void visitConstantExpr(const ConstantExpr *CE);
501  void verifyStatepoint(ImmutableCallSite CS);
502  void verifyFrameRecoverIndices();
503  void verifySiblingFuncletUnwinds();
504
505  void verifyFragmentExpression(const DbgInfoIntrinsic &I);
506  template <typename ValueOrMetadata>
507  void verifyFragmentExpression(const DIVariable &V,
508                                DIExpression::FragmentInfo Fragment,
509                                ValueOrMetadata *Desc);
510  void verifyFnArgs(const DbgInfoIntrinsic &I);
511
512  /// Module-level debug info verification...
513  void verifyCompileUnits();
514
515  /// Module-level verification that all @llvm.experimental.deoptimize
516  /// declarations share the same calling convention.
517  void verifyDeoptimizeCallingConvs();
518};
519
520} // end anonymous namespace
521
522/// We know that cond should be true, if not print an error message.
523#define Assert(C, ...) \
524  do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
525
526/// We know that a debug info condition should be true, if not print
527/// an error message.
528#define AssertDI(C, ...) \
529  do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
530
531void Verifier::visit(Instruction &I) {
532  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
533    Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
534  InstVisitor<Verifier>::visit(I);
535}
536
537// Helper to recursively iterate over indirect users. By
538// returning false, the callback can ask to stop recursing
539// further.
540static void forEachUser(const Value *User,
541                        SmallPtrSet<const Value *, 32> &Visited,
542                        llvm::function_ref<bool(const Value *)> Callback) {
543  if (!Visited.insert(User).second)
544    return;
545  for (const Value *TheNextUser : User->materialized_users())
546    if (Callback(TheNextUser))
547      forEachUser(TheNextUser, Visited, Callback);
548}
549
550void Verifier::visitGlobalValue(const GlobalValue &GV) {
551  Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
552         "Global is external, but doesn't have external or weak linkage!", &GV);
553
554  Assert(GV.getAlignment() <= Value::MaximumAlignment,
555         "huge alignment values are unsupported", &GV);
556  Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
557         "Only global variables can have appending linkage!", &GV);
558
559  if (GV.hasAppendingLinkage()) {
560    const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
561    Assert(GVar && GVar->getValueType()->isArrayTy(),
562           "Only global arrays can have appending linkage!", GVar);
563  }
564
565  if (GV.isDeclarationForLinker())
566    Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
567
568  if (GV.hasDLLImportStorageClass())
569    Assert(!GV.isDSOLocal(),
570           "GlobalValue with DLLImport Storage is dso_local!", &GV);
571
572  forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
573    if (const Instruction *I = dyn_cast<Instruction>(V)) {
574      if (!I->getParent() || !I->getParent()->getParent())
575        CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
576                    I);
577      else if (I->getParent()->getParent()->getParent() != &M)
578        CheckFailed("Global is referenced in a different module!", &GV, &M, I,
579                    I->getParent()->getParent(),
580                    I->getParent()->getParent()->getParent());
581      return false;
582    } else if (const Function *F = dyn_cast<Function>(V)) {
583      if (F->getParent() != &M)
584        CheckFailed("Global is used by function in a different module", &GV, &M,
585                    F, F->getParent());
586      return false;
587    }
588    return true;
589  });
590}
591
592void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
593  if (GV.hasInitializer()) {
594    Assert(GV.getInitializer()->getType() == GV.getValueType(),
595           "Global variable initializer type does not match global "
596           "variable type!",
597           &GV);
598    // If the global has common linkage, it must have a zero initializer and
599    // cannot be constant.
600    if (GV.hasCommonLinkage()) {
601      Assert(GV.getInitializer()->isNullValue(),
602             "'common' global must have a zero initializer!", &GV);
603      Assert(!GV.isConstant(), "'common' global may not be marked constant!",
604             &GV);
605      Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
606    }
607  }
608
609  if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
610                       GV.getName() == "llvm.global_dtors")) {
611    Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
612           "invalid linkage for intrinsic global variable", &GV);
613    // Don't worry about emitting an error for it not being an array,
614    // visitGlobalValue will complain on appending non-array.
615    if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
616      StructType *STy = dyn_cast<StructType>(ATy->getElementType());
617      PointerType *FuncPtrTy =
618          FunctionType::get(Type::getVoidTy(Context), false)->getPointerTo();
619      // FIXME: Reject the 2-field form in LLVM 4.0.
620      Assert(STy &&
621                 (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
622                 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
623                 STy->getTypeAtIndex(1) == FuncPtrTy,
624             "wrong type for intrinsic global variable", &GV);
625      if (STy->getNumElements() == 3) {
626        Type *ETy = STy->getTypeAtIndex(2);
627        Assert(ETy->isPointerTy() &&
628                   cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
629               "wrong type for intrinsic global variable", &GV);
630      }
631    }
632  }
633
634  if (GV.hasName() && (GV.getName() == "llvm.used" ||
635                       GV.getName() == "llvm.compiler.used")) {
636    Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
637           "invalid linkage for intrinsic global variable", &GV);
638    Type *GVType = GV.getValueType();
639    if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
640      PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
641      Assert(PTy, "wrong type for intrinsic global variable", &GV);
642      if (GV.hasInitializer()) {
643        const Constant *Init = GV.getInitializer();
644        const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
645        Assert(InitArray, "wrong initalizer for intrinsic global variable",
646               Init);
647        for (Value *Op : InitArray->operands()) {
648          Value *V = Op->stripPointerCastsNoFollowAliases();
649          Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
650                     isa<GlobalAlias>(V),
651                 "invalid llvm.used member", V);
652          Assert(V->hasName(), "members of llvm.used must be named", V);
653        }
654      }
655    }
656  }
657
658  Assert(!GV.hasDLLImportStorageClass() ||
659             (GV.isDeclaration() && GV.hasExternalLinkage()) ||
660             GV.hasAvailableExternallyLinkage(),
661         "Global is marked as dllimport, but not external", &GV);
662
663  // Visit any debug info attachments.
664  SmallVector<MDNode *, 1> MDs;
665  GV.getMetadata(LLVMContext::MD_dbg, MDs);
666  for (auto *MD : MDs) {
667    if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
668      visitDIGlobalVariableExpression(*GVE);
669    else
670      AssertDI(false, "!dbg attachment of global variable must be a "
671                      "DIGlobalVariableExpression");
672  }
673
674  if (!GV.hasInitializer()) {
675    visitGlobalValue(GV);
676    return;
677  }
678
679  // Walk any aggregate initializers looking for bitcasts between address spaces
680  visitConstantExprsRecursively(GV.getInitializer());
681
682  visitGlobalValue(GV);
683}
684
685void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
686  SmallPtrSet<const GlobalAlias*, 4> Visited;
687  Visited.insert(&GA);
688  visitAliaseeSubExpr(Visited, GA, C);
689}
690
691void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
692                                   const GlobalAlias &GA, const Constant &C) {
693  if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
694    Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
695           &GA);
696
697    if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
698      Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
699
700      Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
701             &GA);
702    } else {
703      // Only continue verifying subexpressions of GlobalAliases.
704      // Do not recurse into global initializers.
705      return;
706    }
707  }
708
709  if (const auto *CE = dyn_cast<ConstantExpr>(&C))
710    visitConstantExprsRecursively(CE);
711
712  for (const Use &U : C.operands()) {
713    Value *V = &*U;
714    if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
715      visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
716    else if (const auto *C2 = dyn_cast<Constant>(V))
717      visitAliaseeSubExpr(Visited, GA, *C2);
718  }
719}
720
721void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
722  Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
723         "Alias should have private, internal, linkonce, weak, linkonce_odr, "
724         "weak_odr, or external linkage!",
725         &GA);
726  const Constant *Aliasee = GA.getAliasee();
727  Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
728  Assert(GA.getType() == Aliasee->getType(),
729         "Alias and aliasee types should match!", &GA);
730
731  Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
732         "Aliasee should be either GlobalValue or ConstantExpr", &GA);
733
734  visitAliaseeSubExpr(GA, *Aliasee);
735
736  visitGlobalValue(GA);
737}
738
739void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
740  // There used to be various other llvm.dbg.* nodes, but we don't support
741  // upgrading them and we want to reserve the namespace for future uses.
742  if (NMD.getName().startswith("llvm.dbg."))
743    AssertDI(NMD.getName() == "llvm.dbg.cu",
744             "unrecognized named metadata node in the llvm.dbg namespace",
745             &NMD);
746  for (const MDNode *MD : NMD.operands()) {
747    if (NMD.getName() == "llvm.dbg.cu")
748      AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
749
750    if (!MD)
751      continue;
752
753    visitMDNode(*MD);
754  }
755}
756
757void Verifier::visitMDNode(const MDNode &MD) {
758  // Only visit each node once.  Metadata can be mutually recursive, so this
759  // avoids infinite recursion here, as well as being an optimization.
760  if (!MDNodes.insert(&MD).second)
761    return;
762
763  switch (MD.getMetadataID()) {
764  default:
765    llvm_unreachable("Invalid MDNode subclass");
766  case Metadata::MDTupleKind:
767    break;
768#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
769  case Metadata::CLASS##Kind:                                                  \
770    visit##CLASS(cast<CLASS>(MD));                                             \
771    break;
772#include "llvm/IR/Metadata.def"
773  }
774
775  for (const Metadata *Op : MD.operands()) {
776    if (!Op)
777      continue;
778    Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
779           &MD, Op);
780    if (auto *N = dyn_cast<MDNode>(Op)) {
781      visitMDNode(*N);
782      continue;
783    }
784    if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
785      visitValueAsMetadata(*V, nullptr);
786      continue;
787    }
788  }
789
790  // Check these last, so we diagnose problems in operands first.
791  Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
792  Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
793}
794
795void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
796  Assert(MD.getValue(), "Expected valid value", &MD);
797  Assert(!MD.getValue()->getType()->isMetadataTy(),
798         "Unexpected metadata round-trip through values", &MD, MD.getValue());
799
800  auto *L = dyn_cast<LocalAsMetadata>(&MD);
801  if (!L)
802    return;
803
804  Assert(F, "function-local metadata used outside a function", L);
805
806  // If this was an instruction, bb, or argument, verify that it is in the
807  // function that we expect.
808  Function *ActualF = nullptr;
809  if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
810    Assert(I->getParent(), "function-local metadata not in basic block", L, I);
811    ActualF = I->getParent()->getParent();
812  } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
813    ActualF = BB->getParent();
814  else if (Argument *A = dyn_cast<Argument>(L->getValue()))
815    ActualF = A->getParent();
816  assert(ActualF && "Unimplemented function local metadata case!");
817
818  Assert(ActualF == F, "function-local metadata used in wrong function", L);
819}
820
821void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
822  Metadata *MD = MDV.getMetadata();
823  if (auto *N = dyn_cast<MDNode>(MD)) {
824    visitMDNode(*N);
825    return;
826  }
827
828  // Only visit each node once.  Metadata can be mutually recursive, so this
829  // avoids infinite recursion here, as well as being an optimization.
830  if (!MDNodes.insert(MD).second)
831    return;
832
833  if (auto *V = dyn_cast<ValueAsMetadata>(MD))
834    visitValueAsMetadata(*V, F);
835}
836
837static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
838static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
839static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
840
841void Verifier::visitDILocation(const DILocation &N) {
842  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
843           "location requires a valid scope", &N, N.getRawScope());
844  if (auto *IA = N.getRawInlinedAt())
845    AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
846  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
847    AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
848}
849
850void Verifier::visitGenericDINode(const GenericDINode &N) {
851  AssertDI(N.getTag(), "invalid tag", &N);
852}
853
854void Verifier::visitDIScope(const DIScope &N) {
855  if (auto *F = N.getRawFile())
856    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
857}
858
859void Verifier::visitDISubrange(const DISubrange &N) {
860  AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
861  AssertDI(N.getCount() >= -1, "invalid subrange count", &N);
862}
863
864void Verifier::visitDIEnumerator(const DIEnumerator &N) {
865  AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
866}
867
868void Verifier::visitDIBasicType(const DIBasicType &N) {
869  AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
870               N.getTag() == dwarf::DW_TAG_unspecified_type,
871           "invalid tag", &N);
872}
873
874void Verifier::visitDIDerivedType(const DIDerivedType &N) {
875  // Common scope checks.
876  visitDIScope(N);
877
878  AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
879               N.getTag() == dwarf::DW_TAG_pointer_type ||
880               N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
881               N.getTag() == dwarf::DW_TAG_reference_type ||
882               N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
883               N.getTag() == dwarf::DW_TAG_const_type ||
884               N.getTag() == dwarf::DW_TAG_volatile_type ||
885               N.getTag() == dwarf::DW_TAG_restrict_type ||
886               N.getTag() == dwarf::DW_TAG_atomic_type ||
887               N.getTag() == dwarf::DW_TAG_member ||
888               N.getTag() == dwarf::DW_TAG_inheritance ||
889               N.getTag() == dwarf::DW_TAG_friend,
890           "invalid tag", &N);
891  if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
892    AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
893             N.getRawExtraData());
894  }
895
896  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
897  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
898           N.getRawBaseType());
899
900  if (N.getDWARFAddressSpace()) {
901    AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
902                 N.getTag() == dwarf::DW_TAG_reference_type,
903             "DWARF address space only applies to pointer or reference types",
904             &N);
905  }
906}
907
908static bool hasConflictingReferenceFlags(unsigned Flags) {
909  return (Flags & DINode::FlagLValueReference) &&
910         (Flags & DINode::FlagRValueReference);
911}
912
913void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
914  auto *Params = dyn_cast<MDTuple>(&RawParams);
915  AssertDI(Params, "invalid template params", &N, &RawParams);
916  for (Metadata *Op : Params->operands()) {
917    AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
918             &N, Params, Op);
919  }
920}
921
922void Verifier::visitDICompositeType(const DICompositeType &N) {
923  // Common scope checks.
924  visitDIScope(N);
925
926  AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
927               N.getTag() == dwarf::DW_TAG_structure_type ||
928               N.getTag() == dwarf::DW_TAG_union_type ||
929               N.getTag() == dwarf::DW_TAG_enumeration_type ||
930               N.getTag() == dwarf::DW_TAG_class_type,
931           "invalid tag", &N);
932
933  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
934  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
935           N.getRawBaseType());
936
937  AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
938           "invalid composite elements", &N, N.getRawElements());
939  AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
940           N.getRawVTableHolder());
941  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
942           "invalid reference flags", &N);
943  if (auto *Params = N.getRawTemplateParams())
944    visitTemplateParams(N, *Params);
945
946  if (N.getTag() == dwarf::DW_TAG_class_type ||
947      N.getTag() == dwarf::DW_TAG_union_type) {
948    AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
949             "class/union requires a filename", &N, N.getFile());
950  }
951}
952
953void Verifier::visitDISubroutineType(const DISubroutineType &N) {
954  AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
955  if (auto *Types = N.getRawTypeArray()) {
956    AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
957    for (Metadata *Ty : N.getTypeArray()->operands()) {
958      AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
959    }
960  }
961  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
962           "invalid reference flags", &N);
963}
964
965void Verifier::visitDIFile(const DIFile &N) {
966  AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
967  AssertDI((N.getChecksumKind() != DIFile::CSK_None ||
968            N.getChecksum().empty()), "invalid checksum kind", &N);
969}
970
971void Verifier::visitDICompileUnit(const DICompileUnit &N) {
972  AssertDI(N.isDistinct(), "compile units must be distinct", &N);
973  AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
974
975  // Don't bother verifying the compilation directory or producer string
976  // as those could be empty.
977  AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
978           N.getRawFile());
979  AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
980           N.getFile());
981
982  AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
983           "invalid emission kind", &N);
984
985  if (auto *Array = N.getRawEnumTypes()) {
986    AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
987    for (Metadata *Op : N.getEnumTypes()->operands()) {
988      auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
989      AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
990               "invalid enum type", &N, N.getEnumTypes(), Op);
991    }
992  }
993  if (auto *Array = N.getRawRetainedTypes()) {
994    AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
995    for (Metadata *Op : N.getRetainedTypes()->operands()) {
996      AssertDI(Op && (isa<DIType>(Op) ||
997                      (isa<DISubprogram>(Op) &&
998                       !cast<DISubprogram>(Op)->isDefinition())),
999               "invalid retained type", &N, Op);
1000    }
1001  }
1002  if (auto *Array = N.getRawGlobalVariables()) {
1003    AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1004    for (Metadata *Op : N.getGlobalVariables()->operands()) {
1005      AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1006               "invalid global variable ref", &N, Op);
1007    }
1008  }
1009  if (auto *Array = N.getRawImportedEntities()) {
1010    AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1011    for (Metadata *Op : N.getImportedEntities()->operands()) {
1012      AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1013               &N, Op);
1014    }
1015  }
1016  if (auto *Array = N.getRawMacros()) {
1017    AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1018    for (Metadata *Op : N.getMacros()->operands()) {
1019      AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1020    }
1021  }
1022  CUVisited.insert(&N);
1023}
1024
1025void Verifier::visitDISubprogram(const DISubprogram &N) {
1026  AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1027  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1028  if (auto *F = N.getRawFile())
1029    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1030  else
1031    AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1032  if (auto *T = N.getRawType())
1033    AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1034  AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1035           N.getRawContainingType());
1036  if (auto *Params = N.getRawTemplateParams())
1037    visitTemplateParams(N, *Params);
1038  if (auto *S = N.getRawDeclaration())
1039    AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1040             "invalid subprogram declaration", &N, S);
1041  if (auto *RawVars = N.getRawVariables()) {
1042    auto *Vars = dyn_cast<MDTuple>(RawVars);
1043    AssertDI(Vars, "invalid variable list", &N, RawVars);
1044    for (Metadata *Op : Vars->operands()) {
1045      AssertDI(Op && isa<DILocalVariable>(Op), "invalid local variable", &N,
1046               Vars, Op);
1047    }
1048  }
1049  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1050           "invalid reference flags", &N);
1051
1052  auto *Unit = N.getRawUnit();
1053  if (N.isDefinition()) {
1054    // Subprogram definitions (not part of the type hierarchy).
1055    AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1056    AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1057    AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1058  } else {
1059    // Subprogram declarations (part of the type hierarchy).
1060    AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1061  }
1062
1063  if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1064    auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1065    AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1066    for (Metadata *Op : ThrownTypes->operands())
1067      AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1068               Op);
1069  }
1070}
1071
1072void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1073  AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1074  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1075           "invalid local scope", &N, N.getRawScope());
1076  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1077    AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1078}
1079
1080void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1081  visitDILexicalBlockBase(N);
1082
1083  AssertDI(N.getLine() || !N.getColumn(),
1084           "cannot have column info without line info", &N);
1085}
1086
1087void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1088  visitDILexicalBlockBase(N);
1089}
1090
1091void Verifier::visitDINamespace(const DINamespace &N) {
1092  AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1093  if (auto *S = N.getRawScope())
1094    AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1095}
1096
1097void Verifier::visitDIMacro(const DIMacro &N) {
1098  AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1099               N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1100           "invalid macinfo type", &N);
1101  AssertDI(!N.getName().empty(), "anonymous macro", &N);
1102  if (!N.getValue().empty()) {
1103    assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1104  }
1105}
1106
1107void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1108  AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1109           "invalid macinfo type", &N);
1110  if (auto *F = N.getRawFile())
1111    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1112
1113  if (auto *Array = N.getRawElements()) {
1114    AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1115    for (Metadata *Op : N.getElements()->operands()) {
1116      AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1117    }
1118  }
1119}
1120
1121void Verifier::visitDIModule(const DIModule &N) {
1122  AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1123  AssertDI(!N.getName().empty(), "anonymous module", &N);
1124}
1125
1126void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1127  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1128}
1129
1130void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1131  visitDITemplateParameter(N);
1132
1133  AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1134           &N);
1135}
1136
1137void Verifier::visitDITemplateValueParameter(
1138    const DITemplateValueParameter &N) {
1139  visitDITemplateParameter(N);
1140
1141  AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1142               N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1143               N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1144           "invalid tag", &N);
1145}
1146
1147void Verifier::visitDIVariable(const DIVariable &N) {
1148  if (auto *S = N.getRawScope())
1149    AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1150  if (auto *F = N.getRawFile())
1151    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1152}
1153
1154void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1155  // Checks common to all variables.
1156  visitDIVariable(N);
1157
1158  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1159  AssertDI(!N.getName().empty(), "missing global variable name", &N);
1160  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1161  AssertDI(N.getType(), "missing global variable type", &N);
1162  if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1163    AssertDI(isa<DIDerivedType>(Member),
1164             "invalid static data member declaration", &N, Member);
1165  }
1166}
1167
1168void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1169  // Checks common to all variables.
1170  visitDIVariable(N);
1171
1172  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1173  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1174  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1175           "local variable requires a valid scope", &N, N.getRawScope());
1176}
1177
1178void Verifier::visitDIExpression(const DIExpression &N) {
1179  AssertDI(N.isValid(), "invalid expression", &N);
1180}
1181
1182void Verifier::visitDIGlobalVariableExpression(
1183    const DIGlobalVariableExpression &GVE) {
1184  AssertDI(GVE.getVariable(), "missing variable");
1185  if (auto *Var = GVE.getVariable())
1186    visitDIGlobalVariable(*Var);
1187  if (auto *Expr = GVE.getExpression()) {
1188    visitDIExpression(*Expr);
1189    if (auto Fragment = Expr->getFragmentInfo())
1190      verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1191  }
1192}
1193
1194void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1195  AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1196  if (auto *T = N.getRawType())
1197    AssertDI(isType(T), "invalid type ref", &N, T);
1198  if (auto *F = N.getRawFile())
1199    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1200}
1201
1202void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1203  AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1204               N.getTag() == dwarf::DW_TAG_imported_declaration,
1205           "invalid tag", &N);
1206  if (auto *S = N.getRawScope())
1207    AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1208  AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1209           N.getRawEntity());
1210}
1211
1212void Verifier::visitComdat(const Comdat &C) {
1213  // The Module is invalid if the GlobalValue has private linkage.  Entities
1214  // with private linkage don't have entries in the symbol table.
1215  if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1216    Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1217           GV);
1218}
1219
1220void Verifier::visitModuleIdents(const Module &M) {
1221  const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1222  if (!Idents)
1223    return;
1224
1225  // llvm.ident takes a list of metadata entry. Each entry has only one string.
1226  // Scan each llvm.ident entry and make sure that this requirement is met.
1227  for (const MDNode *N : Idents->operands()) {
1228    Assert(N->getNumOperands() == 1,
1229           "incorrect number of operands in llvm.ident metadata", N);
1230    Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1231           ("invalid value for llvm.ident metadata entry operand"
1232            "(the operand should be a string)"),
1233           N->getOperand(0));
1234  }
1235}
1236
1237void Verifier::visitModuleFlags(const Module &M) {
1238  const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1239  if (!Flags) return;
1240
1241  // Scan each flag, and track the flags and requirements.
1242  DenseMap<const MDString*, const MDNode*> SeenIDs;
1243  SmallVector<const MDNode*, 16> Requirements;
1244  for (const MDNode *MDN : Flags->operands())
1245    visitModuleFlag(MDN, SeenIDs, Requirements);
1246
1247  // Validate that the requirements in the module are valid.
1248  for (const MDNode *Requirement : Requirements) {
1249    const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1250    const Metadata *ReqValue = Requirement->getOperand(1);
1251
1252    const MDNode *Op = SeenIDs.lookup(Flag);
1253    if (!Op) {
1254      CheckFailed("invalid requirement on flag, flag is not present in module",
1255                  Flag);
1256      continue;
1257    }
1258
1259    if (Op->getOperand(2) != ReqValue) {
1260      CheckFailed(("invalid requirement on flag, "
1261                   "flag does not have the required value"),
1262                  Flag);
1263      continue;
1264    }
1265  }
1266}
1267
1268void
1269Verifier::visitModuleFlag(const MDNode *Op,
1270                          DenseMap<const MDString *, const MDNode *> &SeenIDs,
1271                          SmallVectorImpl<const MDNode *> &Requirements) {
1272  // Each module flag should have three arguments, the merge behavior (a
1273  // constant int), the flag ID (an MDString), and the value.
1274  Assert(Op->getNumOperands() == 3,
1275         "incorrect number of operands in module flag", Op);
1276  Module::ModFlagBehavior MFB;
1277  if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1278    Assert(
1279        mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1280        "invalid behavior operand in module flag (expected constant integer)",
1281        Op->getOperand(0));
1282    Assert(false,
1283           "invalid behavior operand in module flag (unexpected constant)",
1284           Op->getOperand(0));
1285  }
1286  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1287  Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1288         Op->getOperand(1));
1289
1290  // Sanity check the values for behaviors with additional requirements.
1291  switch (MFB) {
1292  case Module::Error:
1293  case Module::Warning:
1294  case Module::Override:
1295    // These behavior types accept any value.
1296    break;
1297
1298  case Module::Max: {
1299    Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1300           "invalid value for 'max' module flag (expected constant integer)",
1301           Op->getOperand(2));
1302    break;
1303  }
1304
1305  case Module::Require: {
1306    // The value should itself be an MDNode with two operands, a flag ID (an
1307    // MDString), and a value.
1308    MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1309    Assert(Value && Value->getNumOperands() == 2,
1310           "invalid value for 'require' module flag (expected metadata pair)",
1311           Op->getOperand(2));
1312    Assert(isa<MDString>(Value->getOperand(0)),
1313           ("invalid value for 'require' module flag "
1314            "(first value operand should be a string)"),
1315           Value->getOperand(0));
1316
1317    // Append it to the list of requirements, to check once all module flags are
1318    // scanned.
1319    Requirements.push_back(Value);
1320    break;
1321  }
1322
1323  case Module::Append:
1324  case Module::AppendUnique: {
1325    // These behavior types require the operand be an MDNode.
1326    Assert(isa<MDNode>(Op->getOperand(2)),
1327           "invalid value for 'append'-type module flag "
1328           "(expected a metadata node)",
1329           Op->getOperand(2));
1330    break;
1331  }
1332  }
1333
1334  // Unless this is a "requires" flag, check the ID is unique.
1335  if (MFB != Module::Require) {
1336    bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1337    Assert(Inserted,
1338           "module flag identifiers must be unique (or of 'require' type)", ID);
1339  }
1340
1341  if (ID->getString() == "wchar_size") {
1342    ConstantInt *Value
1343      = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1344    Assert(Value, "wchar_size metadata requires constant integer argument");
1345  }
1346
1347  if (ID->getString() == "Linker Options") {
1348    // If the llvm.linker.options named metadata exists, we assume that the
1349    // bitcode reader has upgraded the module flag. Otherwise the flag might
1350    // have been created by a client directly.
1351    Assert(M.getNamedMetadata("llvm.linker.options"),
1352           "'Linker Options' named metadata no longer supported");
1353  }
1354}
1355
1356/// Return true if this attribute kind only applies to functions.
1357static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1358  switch (Kind) {
1359  case Attribute::NoReturn:
1360  case Attribute::NoUnwind:
1361  case Attribute::NoInline:
1362  case Attribute::AlwaysInline:
1363  case Attribute::OptimizeForSize:
1364  case Attribute::StackProtect:
1365  case Attribute::StackProtectReq:
1366  case Attribute::StackProtectStrong:
1367  case Attribute::SafeStack:
1368  case Attribute::NoRedZone:
1369  case Attribute::NoImplicitFloat:
1370  case Attribute::Naked:
1371  case Attribute::InlineHint:
1372  case Attribute::StackAlignment:
1373  case Attribute::UWTable:
1374  case Attribute::NonLazyBind:
1375  case Attribute::ReturnsTwice:
1376  case Attribute::SanitizeAddress:
1377  case Attribute::SanitizeHWAddress:
1378  case Attribute::SanitizeThread:
1379  case Attribute::SanitizeMemory:
1380  case Attribute::MinSize:
1381  case Attribute::NoDuplicate:
1382  case Attribute::Builtin:
1383  case Attribute::NoBuiltin:
1384  case Attribute::Cold:
1385  case Attribute::OptimizeNone:
1386  case Attribute::JumpTable:
1387  case Attribute::Convergent:
1388  case Attribute::ArgMemOnly:
1389  case Attribute::NoRecurse:
1390  case Attribute::InaccessibleMemOnly:
1391  case Attribute::InaccessibleMemOrArgMemOnly:
1392  case Attribute::AllocSize:
1393  case Attribute::Speculatable:
1394  case Attribute::StrictFP:
1395    return true;
1396  default:
1397    break;
1398  }
1399  return false;
1400}
1401
1402/// Return true if this is a function attribute that can also appear on
1403/// arguments.
1404static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1405  return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1406         Kind == Attribute::ReadNone;
1407}
1408
1409void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1410                                    const Value *V) {
1411  for (Attribute A : Attrs) {
1412    if (A.isStringAttribute())
1413      continue;
1414
1415    if (isFuncOnlyAttr(A.getKindAsEnum())) {
1416      if (!IsFunction) {
1417        CheckFailed("Attribute '" + A.getAsString() +
1418                        "' only applies to functions!",
1419                    V);
1420        return;
1421      }
1422    } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1423      CheckFailed("Attribute '" + A.getAsString() +
1424                      "' does not apply to functions!",
1425                  V);
1426      return;
1427    }
1428  }
1429}
1430
1431// VerifyParameterAttrs - Check the given attributes for an argument or return
1432// value of the specified type.  The value V is printed in error messages.
1433void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1434                                    const Value *V) {
1435  if (!Attrs.hasAttributes())
1436    return;
1437
1438  verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1439
1440  // Check for mutually incompatible attributes.  Only inreg is compatible with
1441  // sret.
1442  unsigned AttrCount = 0;
1443  AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1444  AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1445  AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1446               Attrs.hasAttribute(Attribute::InReg);
1447  AttrCount += Attrs.hasAttribute(Attribute::Nest);
1448  Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1449                         "and 'sret' are incompatible!",
1450         V);
1451
1452  Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1453           Attrs.hasAttribute(Attribute::ReadOnly)),
1454         "Attributes "
1455         "'inalloca and readonly' are incompatible!",
1456         V);
1457
1458  Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1459           Attrs.hasAttribute(Attribute::Returned)),
1460         "Attributes "
1461         "'sret and returned' are incompatible!",
1462         V);
1463
1464  Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1465           Attrs.hasAttribute(Attribute::SExt)),
1466         "Attributes "
1467         "'zeroext and signext' are incompatible!",
1468         V);
1469
1470  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1471           Attrs.hasAttribute(Attribute::ReadOnly)),
1472         "Attributes "
1473         "'readnone and readonly' are incompatible!",
1474         V);
1475
1476  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1477           Attrs.hasAttribute(Attribute::WriteOnly)),
1478         "Attributes "
1479         "'readnone and writeonly' are incompatible!",
1480         V);
1481
1482  Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1483           Attrs.hasAttribute(Attribute::WriteOnly)),
1484         "Attributes "
1485         "'readonly and writeonly' are incompatible!",
1486         V);
1487
1488  Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1489           Attrs.hasAttribute(Attribute::AlwaysInline)),
1490         "Attributes "
1491         "'noinline and alwaysinline' are incompatible!",
1492         V);
1493
1494  AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1495  Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1496         "Wrong types for attribute: " +
1497             AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1498         V);
1499
1500  if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1501    SmallPtrSet<Type*, 4> Visited;
1502    if (!PTy->getElementType()->isSized(&Visited)) {
1503      Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1504                 !Attrs.hasAttribute(Attribute::InAlloca),
1505             "Attributes 'byval' and 'inalloca' do not support unsized types!",
1506             V);
1507    }
1508    if (!isa<PointerType>(PTy->getElementType()))
1509      Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1510             "Attribute 'swifterror' only applies to parameters "
1511             "with pointer to pointer type!",
1512             V);
1513  } else {
1514    Assert(!Attrs.hasAttribute(Attribute::ByVal),
1515           "Attribute 'byval' only applies to parameters with pointer type!",
1516           V);
1517    Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1518           "Attribute 'swifterror' only applies to parameters "
1519           "with pointer type!",
1520           V);
1521  }
1522}
1523
1524// Check parameter attributes against a function type.
1525// The value V is printed in error messages.
1526void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1527                                   const Value *V) {
1528  if (Attrs.isEmpty())
1529    return;
1530
1531  bool SawNest = false;
1532  bool SawReturned = false;
1533  bool SawSRet = false;
1534  bool SawSwiftSelf = false;
1535  bool SawSwiftError = false;
1536
1537  // Verify return value attributes.
1538  AttributeSet RetAttrs = Attrs.getRetAttributes();
1539  Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1540          !RetAttrs.hasAttribute(Attribute::Nest) &&
1541          !RetAttrs.hasAttribute(Attribute::StructRet) &&
1542          !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1543          !RetAttrs.hasAttribute(Attribute::Returned) &&
1544          !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1545          !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1546          !RetAttrs.hasAttribute(Attribute::SwiftError)),
1547         "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1548         "'returned', 'swiftself', and 'swifterror' do not apply to return "
1549         "values!",
1550         V);
1551  Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1552          !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1553          !RetAttrs.hasAttribute(Attribute::ReadNone)),
1554         "Attribute '" + RetAttrs.getAsString() +
1555             "' does not apply to function returns",
1556         V);
1557  verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1558
1559  // Verify parameter attributes.
1560  for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1561    Type *Ty = FT->getParamType(i);
1562    AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1563
1564    verifyParameterAttrs(ArgAttrs, Ty, V);
1565
1566    if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1567      Assert(!SawNest, "More than one parameter has attribute nest!", V);
1568      SawNest = true;
1569    }
1570
1571    if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1572      Assert(!SawReturned, "More than one parameter has attribute returned!",
1573             V);
1574      Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1575             "Incompatible argument and return types for 'returned' attribute",
1576             V);
1577      SawReturned = true;
1578    }
1579
1580    if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1581      Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1582      Assert(i == 0 || i == 1,
1583             "Attribute 'sret' is not on first or second parameter!", V);
1584      SawSRet = true;
1585    }
1586
1587    if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1588      Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1589      SawSwiftSelf = true;
1590    }
1591
1592    if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1593      Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1594             V);
1595      SawSwiftError = true;
1596    }
1597
1598    if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1599      Assert(i == FT->getNumParams() - 1,
1600             "inalloca isn't on the last parameter!", V);
1601    }
1602  }
1603
1604  if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1605    return;
1606
1607  verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1608
1609  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1610           Attrs.hasFnAttribute(Attribute::ReadOnly)),
1611         "Attributes 'readnone and readonly' are incompatible!", V);
1612
1613  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1614           Attrs.hasFnAttribute(Attribute::WriteOnly)),
1615         "Attributes 'readnone and writeonly' are incompatible!", V);
1616
1617  Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1618           Attrs.hasFnAttribute(Attribute::WriteOnly)),
1619         "Attributes 'readonly and writeonly' are incompatible!", V);
1620
1621  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1622           Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1623         "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1624         "incompatible!",
1625         V);
1626
1627  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1628           Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1629         "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1630
1631  Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1632           Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1633         "Attributes 'noinline and alwaysinline' are incompatible!", V);
1634
1635  if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1636    Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1637           "Attribute 'optnone' requires 'noinline'!", V);
1638
1639    Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1640           "Attributes 'optsize and optnone' are incompatible!", V);
1641
1642    Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1643           "Attributes 'minsize and optnone' are incompatible!", V);
1644  }
1645
1646  if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1647    const GlobalValue *GV = cast<GlobalValue>(V);
1648    Assert(GV->hasGlobalUnnamedAddr(),
1649           "Attribute 'jumptable' requires 'unnamed_addr'", V);
1650  }
1651
1652  if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1653    std::pair<unsigned, Optional<unsigned>> Args =
1654        Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1655
1656    auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1657      if (ParamNo >= FT->getNumParams()) {
1658        CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1659        return false;
1660      }
1661
1662      if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1663        CheckFailed("'allocsize' " + Name +
1664                        " argument must refer to an integer parameter",
1665                    V);
1666        return false;
1667      }
1668
1669      return true;
1670    };
1671
1672    if (!CheckParam("element size", Args.first))
1673      return;
1674
1675    if (Args.second && !CheckParam("number of elements", *Args.second))
1676      return;
1677  }
1678}
1679
1680void Verifier::verifyFunctionMetadata(
1681    ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1682  for (const auto &Pair : MDs) {
1683    if (Pair.first == LLVMContext::MD_prof) {
1684      MDNode *MD = Pair.second;
1685      Assert(MD->getNumOperands() >= 2,
1686             "!prof annotations should have no less than 2 operands", MD);
1687
1688      // Check first operand.
1689      Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1690             MD);
1691      Assert(isa<MDString>(MD->getOperand(0)),
1692             "expected string with name of the !prof annotation", MD);
1693      MDString *MDS = cast<MDString>(MD->getOperand(0));
1694      StringRef ProfName = MDS->getString();
1695      Assert(ProfName.equals("function_entry_count"),
1696             "first operand should be 'function_entry_count'", MD);
1697
1698      // Check second operand.
1699      Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1700             MD);
1701      Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1702             "expected integer argument to function_entry_count", MD);
1703    }
1704  }
1705}
1706
1707void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1708  if (!ConstantExprVisited.insert(EntryC).second)
1709    return;
1710
1711  SmallVector<const Constant *, 16> Stack;
1712  Stack.push_back(EntryC);
1713
1714  while (!Stack.empty()) {
1715    const Constant *C = Stack.pop_back_val();
1716
1717    // Check this constant expression.
1718    if (const auto *CE = dyn_cast<ConstantExpr>(C))
1719      visitConstantExpr(CE);
1720
1721    if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1722      // Global Values get visited separately, but we do need to make sure
1723      // that the global value is in the correct module
1724      Assert(GV->getParent() == &M, "Referencing global in another module!",
1725             EntryC, &M, GV, GV->getParent());
1726      continue;
1727    }
1728
1729    // Visit all sub-expressions.
1730    for (const Use &U : C->operands()) {
1731      const auto *OpC = dyn_cast<Constant>(U);
1732      if (!OpC)
1733        continue;
1734      if (!ConstantExprVisited.insert(OpC).second)
1735        continue;
1736      Stack.push_back(OpC);
1737    }
1738  }
1739}
1740
1741void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1742  if (CE->getOpcode() == Instruction::BitCast)
1743    Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1744                                 CE->getType()),
1745           "Invalid bitcast", CE);
1746
1747  if (CE->getOpcode() == Instruction::IntToPtr ||
1748      CE->getOpcode() == Instruction::PtrToInt) {
1749    auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1750                      ? CE->getType()
1751                      : CE->getOperand(0)->getType();
1752    StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1753                        ? "inttoptr not supported for non-integral pointers"
1754                        : "ptrtoint not supported for non-integral pointers";
1755    Assert(
1756        !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1757        Msg);
1758  }
1759}
1760
1761bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1762  // There shouldn't be more attribute sets than there are parameters plus the
1763  // function and return value.
1764  return Attrs.getNumAttrSets() <= Params + 2;
1765}
1766
1767/// Verify that statepoint intrinsic is well formed.
1768void Verifier::verifyStatepoint(ImmutableCallSite CS) {
1769  assert(CS.getCalledFunction() &&
1770         CS.getCalledFunction()->getIntrinsicID() ==
1771           Intrinsic::experimental_gc_statepoint);
1772
1773  const Instruction &CI = *CS.getInstruction();
1774
1775  Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
1776         !CS.onlyAccessesArgMemory(),
1777         "gc.statepoint must read and write all memory to preserve "
1778         "reordering restrictions required by safepoint semantics",
1779         &CI);
1780
1781  const Value *IDV = CS.getArgument(0);
1782  Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
1783         &CI);
1784
1785  const Value *NumPatchBytesV = CS.getArgument(1);
1786  Assert(isa<ConstantInt>(NumPatchBytesV),
1787         "gc.statepoint number of patchable bytes must be a constant integer",
1788         &CI);
1789  const int64_t NumPatchBytes =
1790      cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
1791  assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1792  Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
1793                             "positive",
1794         &CI);
1795
1796  const Value *Target = CS.getArgument(2);
1797  auto *PT = dyn_cast<PointerType>(Target->getType());
1798  Assert(PT && PT->getElementType()->isFunctionTy(),
1799         "gc.statepoint callee must be of function pointer type", &CI, Target);
1800  FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1801
1802  const Value *NumCallArgsV = CS.getArgument(3);
1803  Assert(isa<ConstantInt>(NumCallArgsV),
1804         "gc.statepoint number of arguments to underlying call "
1805         "must be constant integer",
1806         &CI);
1807  const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
1808  Assert(NumCallArgs >= 0,
1809         "gc.statepoint number of arguments to underlying call "
1810         "must be positive",
1811         &CI);
1812  const int NumParams = (int)TargetFuncType->getNumParams();
1813  if (TargetFuncType->isVarArg()) {
1814    Assert(NumCallArgs >= NumParams,
1815           "gc.statepoint mismatch in number of vararg call args", &CI);
1816
1817    // TODO: Remove this limitation
1818    Assert(TargetFuncType->getReturnType()->isVoidTy(),
1819           "gc.statepoint doesn't support wrapping non-void "
1820           "vararg functions yet",
1821           &CI);
1822  } else
1823    Assert(NumCallArgs == NumParams,
1824           "gc.statepoint mismatch in number of call args", &CI);
1825
1826  const Value *FlagsV = CS.getArgument(4);
1827  Assert(isa<ConstantInt>(FlagsV),
1828         "gc.statepoint flags must be constant integer", &CI);
1829  const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
1830  Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1831         "unknown flag used in gc.statepoint flags argument", &CI);
1832
1833  // Verify that the types of the call parameter arguments match
1834  // the type of the wrapped callee.
1835  for (int i = 0; i < NumParams; i++) {
1836    Type *ParamType = TargetFuncType->getParamType(i);
1837    Type *ArgType = CS.getArgument(5 + i)->getType();
1838    Assert(ArgType == ParamType,
1839           "gc.statepoint call argument does not match wrapped "
1840           "function type",
1841           &CI);
1842  }
1843
1844  const int EndCallArgsInx = 4 + NumCallArgs;
1845
1846  const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
1847  Assert(isa<ConstantInt>(NumTransitionArgsV),
1848         "gc.statepoint number of transition arguments "
1849         "must be constant integer",
1850         &CI);
1851  const int NumTransitionArgs =
1852      cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
1853  Assert(NumTransitionArgs >= 0,
1854         "gc.statepoint number of transition arguments must be positive", &CI);
1855  const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
1856
1857  const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
1858  Assert(isa<ConstantInt>(NumDeoptArgsV),
1859         "gc.statepoint number of deoptimization arguments "
1860         "must be constant integer",
1861         &CI);
1862  const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
1863  Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
1864                            "must be positive",
1865         &CI);
1866
1867  const int ExpectedNumArgs =
1868      7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
1869  Assert(ExpectedNumArgs <= (int)CS.arg_size(),
1870         "gc.statepoint too few arguments according to length fields", &CI);
1871
1872  // Check that the only uses of this gc.statepoint are gc.result or
1873  // gc.relocate calls which are tied to this statepoint and thus part
1874  // of the same statepoint sequence
1875  for (const User *U : CI.users()) {
1876    const CallInst *Call = dyn_cast<const CallInst>(U);
1877    Assert(Call, "illegal use of statepoint token", &CI, U);
1878    if (!Call) continue;
1879    Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
1880           "gc.result or gc.relocate are the only value uses "
1881           "of a gc.statepoint",
1882           &CI, U);
1883    if (isa<GCResultInst>(Call)) {
1884      Assert(Call->getArgOperand(0) == &CI,
1885             "gc.result connected to wrong gc.statepoint", &CI, Call);
1886    } else if (isa<GCRelocateInst>(Call)) {
1887      Assert(Call->getArgOperand(0) == &CI,
1888             "gc.relocate connected to wrong gc.statepoint", &CI, Call);
1889    }
1890  }
1891
1892  // Note: It is legal for a single derived pointer to be listed multiple
1893  // times.  It's non-optimal, but it is legal.  It can also happen after
1894  // insertion if we strip a bitcast away.
1895  // Note: It is really tempting to check that each base is relocated and
1896  // that a derived pointer is never reused as a base pointer.  This turns
1897  // out to be problematic since optimizations run after safepoint insertion
1898  // can recognize equality properties that the insertion logic doesn't know
1899  // about.  See example statepoint.ll in the verifier subdirectory
1900}
1901
1902void Verifier::verifyFrameRecoverIndices() {
1903  for (auto &Counts : FrameEscapeInfo) {
1904    Function *F = Counts.first;
1905    unsigned EscapedObjectCount = Counts.second.first;
1906    unsigned MaxRecoveredIndex = Counts.second.second;
1907    Assert(MaxRecoveredIndex <= EscapedObjectCount,
1908           "all indices passed to llvm.localrecover must be less than the "
1909           "number of arguments passed ot llvm.localescape in the parent "
1910           "function",
1911           F);
1912  }
1913}
1914
1915static Instruction *getSuccPad(TerminatorInst *Terminator) {
1916  BasicBlock *UnwindDest;
1917  if (auto *II = dyn_cast<InvokeInst>(Terminator))
1918    UnwindDest = II->getUnwindDest();
1919  else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
1920    UnwindDest = CSI->getUnwindDest();
1921  else
1922    UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
1923  return UnwindDest->getFirstNonPHI();
1924}
1925
1926void Verifier::verifySiblingFuncletUnwinds() {
1927  SmallPtrSet<Instruction *, 8> Visited;
1928  SmallPtrSet<Instruction *, 8> Active;
1929  for (const auto &Pair : SiblingFuncletInfo) {
1930    Instruction *PredPad = Pair.first;
1931    if (Visited.count(PredPad))
1932      continue;
1933    Active.insert(PredPad);
1934    TerminatorInst *Terminator = Pair.second;
1935    do {
1936      Instruction *SuccPad = getSuccPad(Terminator);
1937      if (Active.count(SuccPad)) {
1938        // Found a cycle; report error
1939        Instruction *CyclePad = SuccPad;
1940        SmallVector<Instruction *, 8> CycleNodes;
1941        do {
1942          CycleNodes.push_back(CyclePad);
1943          TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad];
1944          if (CycleTerminator != CyclePad)
1945            CycleNodes.push_back(CycleTerminator);
1946          CyclePad = getSuccPad(CycleTerminator);
1947        } while (CyclePad != SuccPad);
1948        Assert(false, "EH pads can't handle each other's exceptions",
1949               ArrayRef<Instruction *>(CycleNodes));
1950      }
1951      // Don't re-walk a node we've already checked
1952      if (!Visited.insert(SuccPad).second)
1953        break;
1954      // Walk to this successor if it has a map entry.
1955      PredPad = SuccPad;
1956      auto TermI = SiblingFuncletInfo.find(PredPad);
1957      if (TermI == SiblingFuncletInfo.end())
1958        break;
1959      Terminator = TermI->second;
1960      Active.insert(PredPad);
1961    } while (true);
1962    // Each node only has one successor, so we've walked all the active
1963    // nodes' successors.
1964    Active.clear();
1965  }
1966}
1967
1968// visitFunction - Verify that a function is ok.
1969//
1970void Verifier::visitFunction(const Function &F) {
1971  visitGlobalValue(F);
1972
1973  // Check function arguments.
1974  FunctionType *FT = F.getFunctionType();
1975  unsigned NumArgs = F.arg_size();
1976
1977  Assert(&Context == &F.getContext(),
1978         "Function context does not match Module context!", &F);
1979
1980  Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
1981  Assert(FT->getNumParams() == NumArgs,
1982         "# formal arguments must match # of arguments for function type!", &F,
1983         FT);
1984  Assert(F.getReturnType()->isFirstClassType() ||
1985             F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
1986         "Functions cannot return aggregate values!", &F);
1987
1988  Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
1989         "Invalid struct return type!", &F);
1990
1991  AttributeList Attrs = F.getAttributes();
1992
1993  Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
1994         "Attribute after last parameter!", &F);
1995
1996  // Check function attributes.
1997  verifyFunctionAttrs(FT, Attrs, &F);
1998
1999  // On function declarations/definitions, we do not support the builtin
2000  // attribute. We do not check this in VerifyFunctionAttrs since that is
2001  // checking for Attributes that can/can not ever be on functions.
2002  Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2003         "Attribute 'builtin' can only be applied to a callsite.", &F);
2004
2005  // Check that this function meets the restrictions on this calling convention.
2006  // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2007  // restrictions can be lifted.
2008  switch (F.getCallingConv()) {
2009  default:
2010  case CallingConv::C:
2011    break;
2012  case CallingConv::AMDGPU_KERNEL:
2013  case CallingConv::SPIR_KERNEL:
2014    Assert(F.getReturnType()->isVoidTy(),
2015           "Calling convention requires void return type", &F);
2016    LLVM_FALLTHROUGH;
2017  case CallingConv::AMDGPU_VS:
2018  case CallingConv::AMDGPU_HS:
2019  case CallingConv::AMDGPU_GS:
2020  case CallingConv::AMDGPU_PS:
2021  case CallingConv::AMDGPU_CS:
2022    Assert(!F.hasStructRetAttr(),
2023           "Calling convention does not allow sret", &F);
2024    LLVM_FALLTHROUGH;
2025  case CallingConv::Fast:
2026  case CallingConv::Cold:
2027  case CallingConv::Intel_OCL_BI:
2028  case CallingConv::PTX_Kernel:
2029  case CallingConv::PTX_Device:
2030    Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2031                          "perfect forwarding!",
2032           &F);
2033    break;
2034  }
2035
2036  bool isLLVMdotName = F.getName().size() >= 5 &&
2037                       F.getName().substr(0, 5) == "llvm.";
2038
2039  // Check that the argument values match the function type for this function...
2040  unsigned i = 0;
2041  for (const Argument &Arg : F.args()) {
2042    Assert(Arg.getType() == FT->getParamType(i),
2043           "Argument value does not match function argument type!", &Arg,
2044           FT->getParamType(i));
2045    Assert(Arg.getType()->isFirstClassType(),
2046           "Function arguments must have first-class types!", &Arg);
2047    if (!isLLVMdotName) {
2048      Assert(!Arg.getType()->isMetadataTy(),
2049             "Function takes metadata but isn't an intrinsic", &Arg, &F);
2050      Assert(!Arg.getType()->isTokenTy(),
2051             "Function takes token but isn't an intrinsic", &Arg, &F);
2052    }
2053
2054    // Check that swifterror argument is only used by loads and stores.
2055    if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2056      verifySwiftErrorValue(&Arg);
2057    }
2058    ++i;
2059  }
2060
2061  if (!isLLVMdotName)
2062    Assert(!F.getReturnType()->isTokenTy(),
2063           "Functions returns a token but isn't an intrinsic", &F);
2064
2065  // Get the function metadata attachments.
2066  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2067  F.getAllMetadata(MDs);
2068  assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2069  verifyFunctionMetadata(MDs);
2070
2071  // Check validity of the personality function
2072  if (F.hasPersonalityFn()) {
2073    auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2074    if (Per)
2075      Assert(Per->getParent() == F.getParent(),
2076             "Referencing personality function in another module!",
2077             &F, F.getParent(), Per, Per->getParent());
2078  }
2079
2080  if (F.isMaterializable()) {
2081    // Function has a body somewhere we can't see.
2082    Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2083           MDs.empty() ? nullptr : MDs.front().second);
2084  } else if (F.isDeclaration()) {
2085    for (const auto &I : MDs) {
2086      AssertDI(I.first != LLVMContext::MD_dbg,
2087               "function declaration may not have a !dbg attachment", &F);
2088      Assert(I.first != LLVMContext::MD_prof,
2089             "function declaration may not have a !prof attachment", &F);
2090
2091      // Verify the metadata itself.
2092      visitMDNode(*I.second);
2093    }
2094    Assert(!F.hasPersonalityFn(),
2095           "Function declaration shouldn't have a personality routine", &F);
2096  } else {
2097    // Verify that this function (which has a body) is not named "llvm.*".  It
2098    // is not legal to define intrinsics.
2099    Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2100
2101    // Check the entry node
2102    const BasicBlock *Entry = &F.getEntryBlock();
2103    Assert(pred_empty(Entry),
2104           "Entry block to function must not have predecessors!", Entry);
2105
2106    // The address of the entry block cannot be taken, unless it is dead.
2107    if (Entry->hasAddressTaken()) {
2108      Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2109             "blockaddress may not be used with the entry block!", Entry);
2110    }
2111
2112    unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2113    // Visit metadata attachments.
2114    for (const auto &I : MDs) {
2115      // Verify that the attachment is legal.
2116      switch (I.first) {
2117      default:
2118        break;
2119      case LLVMContext::MD_dbg: {
2120        ++NumDebugAttachments;
2121        AssertDI(NumDebugAttachments == 1,
2122                 "function must have a single !dbg attachment", &F, I.second);
2123        AssertDI(isa<DISubprogram>(I.second),
2124                 "function !dbg attachment must be a subprogram", &F, I.second);
2125        auto *SP = cast<DISubprogram>(I.second);
2126        const Function *&AttachedTo = DISubprogramAttachments[SP];
2127        AssertDI(!AttachedTo || AttachedTo == &F,
2128                 "DISubprogram attached to more than one function", SP, &F);
2129        AttachedTo = &F;
2130        break;
2131      }
2132      case LLVMContext::MD_prof:
2133        ++NumProfAttachments;
2134        Assert(NumProfAttachments == 1,
2135               "function must have a single !prof attachment", &F, I.second);
2136        break;
2137      }
2138
2139      // Verify the metadata itself.
2140      visitMDNode(*I.second);
2141    }
2142  }
2143
2144  // If this function is actually an intrinsic, verify that it is only used in
2145  // direct call/invokes, never having its "address taken".
2146  // Only do this if the module is materialized, otherwise we don't have all the
2147  // uses.
2148  if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2149    const User *U;
2150    if (F.hasAddressTaken(&U))
2151      Assert(false, "Invalid user of intrinsic instruction!", U);
2152  }
2153
2154  Assert(!F.hasDLLImportStorageClass() ||
2155             (F.isDeclaration() && F.hasExternalLinkage()) ||
2156             F.hasAvailableExternallyLinkage(),
2157         "Function is marked as dllimport, but not external.", &F);
2158
2159  auto *N = F.getSubprogram();
2160  HasDebugInfo = (N != nullptr);
2161  if (!HasDebugInfo)
2162    return;
2163
2164  // Check that all !dbg attachments lead to back to N (or, at least, another
2165  // subprogram that describes the same function).
2166  //
2167  // FIXME: Check this incrementally while visiting !dbg attachments.
2168  // FIXME: Only check when N is the canonical subprogram for F.
2169  SmallPtrSet<const MDNode *, 32> Seen;
2170  for (auto &BB : F)
2171    for (auto &I : BB) {
2172      // Be careful about using DILocation here since we might be dealing with
2173      // broken code (this is the Verifier after all).
2174      DILocation *DL =
2175          dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
2176      if (!DL)
2177        continue;
2178      if (!Seen.insert(DL).second)
2179        continue;
2180
2181      DILocalScope *Scope = DL->getInlinedAtScope();
2182      if (Scope && !Seen.insert(Scope).second)
2183        continue;
2184
2185      DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2186
2187      // Scope and SP could be the same MDNode and we don't want to skip
2188      // validation in that case
2189      if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2190        continue;
2191
2192      // FIXME: Once N is canonical, check "SP == &N".
2193      AssertDI(SP->describes(&F),
2194               "!dbg attachment points at wrong subprogram for function", N, &F,
2195               &I, DL, Scope, SP);
2196    }
2197}
2198
2199// verifyBasicBlock - Verify that a basic block is well formed...
2200//
2201void Verifier::visitBasicBlock(BasicBlock &BB) {
2202  InstsInThisBlock.clear();
2203
2204  // Ensure that basic blocks have terminators!
2205  Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2206
2207  // Check constraints that this basic block imposes on all of the PHI nodes in
2208  // it.
2209  if (isa<PHINode>(BB.front())) {
2210    SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2211    SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2212    std::sort(Preds.begin(), Preds.end());
2213    for (const PHINode &PN : BB.phis()) {
2214      // Ensure that PHI nodes have at least one entry!
2215      Assert(PN.getNumIncomingValues() != 0,
2216             "PHI nodes must have at least one entry.  If the block is dead, "
2217             "the PHI should be removed!",
2218             &PN);
2219      Assert(PN.getNumIncomingValues() == Preds.size(),
2220             "PHINode should have one entry for each predecessor of its "
2221             "parent basic block!",
2222             &PN);
2223
2224      // Get and sort all incoming values in the PHI node...
2225      Values.clear();
2226      Values.reserve(PN.getNumIncomingValues());
2227      for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2228        Values.push_back(
2229            std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2230      std::sort(Values.begin(), Values.end());
2231
2232      for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2233        // Check to make sure that if there is more than one entry for a
2234        // particular basic block in this PHI node, that the incoming values are
2235        // all identical.
2236        //
2237        Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2238                   Values[i].second == Values[i - 1].second,
2239               "PHI node has multiple entries for the same basic block with "
2240               "different incoming values!",
2241               &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2242
2243        // Check to make sure that the predecessors and PHI node entries are
2244        // matched up.
2245        Assert(Values[i].first == Preds[i],
2246               "PHI node entries do not match predecessors!", &PN,
2247               Values[i].first, Preds[i]);
2248      }
2249    }
2250  }
2251
2252  // Check that all instructions have their parent pointers set up correctly.
2253  for (auto &I : BB)
2254  {
2255    Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2256  }
2257}
2258
2259void Verifier::visitTerminatorInst(TerminatorInst &I) {
2260  // Ensure that terminators only exist at the end of the basic block.
2261  Assert(&I == I.getParent()->getTerminator(),
2262         "Terminator found in the middle of a basic block!", I.getParent());
2263  visitInstruction(I);
2264}
2265
2266void Verifier::visitBranchInst(BranchInst &BI) {
2267  if (BI.isConditional()) {
2268    Assert(BI.getCondition()->getType()->isIntegerTy(1),
2269           "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2270  }
2271  visitTerminatorInst(BI);
2272}
2273
2274void Verifier::visitReturnInst(ReturnInst &RI) {
2275  Function *F = RI.getParent()->getParent();
2276  unsigned N = RI.getNumOperands();
2277  if (F->getReturnType()->isVoidTy())
2278    Assert(N == 0,
2279           "Found return instr that returns non-void in Function of void "
2280           "return type!",
2281           &RI, F->getReturnType());
2282  else
2283    Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2284           "Function return type does not match operand "
2285           "type of return inst!",
2286           &RI, F->getReturnType());
2287
2288  // Check to make sure that the return value has necessary properties for
2289  // terminators...
2290  visitTerminatorInst(RI);
2291}
2292
2293void Verifier::visitSwitchInst(SwitchInst &SI) {
2294  // Check to make sure that all of the constants in the switch instruction
2295  // have the same type as the switched-on value.
2296  Type *SwitchTy = SI.getCondition()->getType();
2297  SmallPtrSet<ConstantInt*, 32> Constants;
2298  for (auto &Case : SI.cases()) {
2299    Assert(Case.getCaseValue()->getType() == SwitchTy,
2300           "Switch constants must all be same type as switch value!", &SI);
2301    Assert(Constants.insert(Case.getCaseValue()).second,
2302           "Duplicate integer as switch case", &SI, Case.getCaseValue());
2303  }
2304
2305  visitTerminatorInst(SI);
2306}
2307
2308void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2309  Assert(BI.getAddress()->getType()->isPointerTy(),
2310         "Indirectbr operand must have pointer type!", &BI);
2311  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2312    Assert(BI.getDestination(i)->getType()->isLabelTy(),
2313           "Indirectbr destinations must all have pointer type!", &BI);
2314
2315  visitTerminatorInst(BI);
2316}
2317
2318void Verifier::visitSelectInst(SelectInst &SI) {
2319  Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2320                                         SI.getOperand(2)),
2321         "Invalid operands for select instruction!", &SI);
2322
2323  Assert(SI.getTrueValue()->getType() == SI.getType(),
2324         "Select values must have same type as select instruction!", &SI);
2325  visitInstruction(SI);
2326}
2327
2328/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2329/// a pass, if any exist, it's an error.
2330///
2331void Verifier::visitUserOp1(Instruction &I) {
2332  Assert(false, "User-defined operators should not live outside of a pass!", &I);
2333}
2334
2335void Verifier::visitTruncInst(TruncInst &I) {
2336  // Get the source and destination types
2337  Type *SrcTy = I.getOperand(0)->getType();
2338  Type *DestTy = I.getType();
2339
2340  // Get the size of the types in bits, we'll need this later
2341  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2342  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2343
2344  Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2345  Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2346  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2347         "trunc source and destination must both be a vector or neither", &I);
2348  Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2349
2350  visitInstruction(I);
2351}
2352
2353void Verifier::visitZExtInst(ZExtInst &I) {
2354  // Get the source and destination types
2355  Type *SrcTy = I.getOperand(0)->getType();
2356  Type *DestTy = I.getType();
2357
2358  // Get the size of the types in bits, we'll need this later
2359  Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2360  Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2361  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2362         "zext source and destination must both be a vector or neither", &I);
2363  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2364  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2365
2366  Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2367
2368  visitInstruction(I);
2369}
2370
2371void Verifier::visitSExtInst(SExtInst &I) {
2372  // Get the source and destination types
2373  Type *SrcTy = I.getOperand(0)->getType();
2374  Type *DestTy = I.getType();
2375
2376  // Get the size of the types in bits, we'll need this later
2377  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2378  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2379
2380  Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2381  Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2382  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2383         "sext source and destination must both be a vector or neither", &I);
2384  Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2385
2386  visitInstruction(I);
2387}
2388
2389void Verifier::visitFPTruncInst(FPTruncInst &I) {
2390  // Get the source and destination types
2391  Type *SrcTy = I.getOperand(0)->getType();
2392  Type *DestTy = I.getType();
2393  // Get the size of the types in bits, we'll need this later
2394  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2395  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2396
2397  Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2398  Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2399  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2400         "fptrunc source and destination must both be a vector or neither", &I);
2401  Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2402
2403  visitInstruction(I);
2404}
2405
2406void Verifier::visitFPExtInst(FPExtInst &I) {
2407  // Get the source and destination types
2408  Type *SrcTy = I.getOperand(0)->getType();
2409  Type *DestTy = I.getType();
2410
2411  // Get the size of the types in bits, we'll need this later
2412  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2413  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2414
2415  Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2416  Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2417  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2418         "fpext source and destination must both be a vector or neither", &I);
2419  Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2420
2421  visitInstruction(I);
2422}
2423
2424void Verifier::visitUIToFPInst(UIToFPInst &I) {
2425  // Get the source and destination types
2426  Type *SrcTy = I.getOperand(0)->getType();
2427  Type *DestTy = I.getType();
2428
2429  bool SrcVec = SrcTy->isVectorTy();
2430  bool DstVec = DestTy->isVectorTy();
2431
2432  Assert(SrcVec == DstVec,
2433         "UIToFP source and dest must both be vector or scalar", &I);
2434  Assert(SrcTy->isIntOrIntVectorTy(),
2435         "UIToFP source must be integer or integer vector", &I);
2436  Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2437         &I);
2438
2439  if (SrcVec && DstVec)
2440    Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2441               cast<VectorType>(DestTy)->getNumElements(),
2442           "UIToFP source and dest vector length mismatch", &I);
2443
2444  visitInstruction(I);
2445}
2446
2447void Verifier::visitSIToFPInst(SIToFPInst &I) {
2448  // Get the source and destination types
2449  Type *SrcTy = I.getOperand(0)->getType();
2450  Type *DestTy = I.getType();
2451
2452  bool SrcVec = SrcTy->isVectorTy();
2453  bool DstVec = DestTy->isVectorTy();
2454
2455  Assert(SrcVec == DstVec,
2456         "SIToFP source and dest must both be vector or scalar", &I);
2457  Assert(SrcTy->isIntOrIntVectorTy(),
2458         "SIToFP source must be integer or integer vector", &I);
2459  Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2460         &I);
2461
2462  if (SrcVec && DstVec)
2463    Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2464               cast<VectorType>(DestTy)->getNumElements(),
2465           "SIToFP source and dest vector length mismatch", &I);
2466
2467  visitInstruction(I);
2468}
2469
2470void Verifier::visitFPToUIInst(FPToUIInst &I) {
2471  // Get the source and destination types
2472  Type *SrcTy = I.getOperand(0)->getType();
2473  Type *DestTy = I.getType();
2474
2475  bool SrcVec = SrcTy->isVectorTy();
2476  bool DstVec = DestTy->isVectorTy();
2477
2478  Assert(SrcVec == DstVec,
2479         "FPToUI source and dest must both be vector or scalar", &I);
2480  Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2481         &I);
2482  Assert(DestTy->isIntOrIntVectorTy(),
2483         "FPToUI result must be integer or integer vector", &I);
2484
2485  if (SrcVec && DstVec)
2486    Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2487               cast<VectorType>(DestTy)->getNumElements(),
2488           "FPToUI source and dest vector length mismatch", &I);
2489
2490  visitInstruction(I);
2491}
2492
2493void Verifier::visitFPToSIInst(FPToSIInst &I) {
2494  // Get the source and destination types
2495  Type *SrcTy = I.getOperand(0)->getType();
2496  Type *DestTy = I.getType();
2497
2498  bool SrcVec = SrcTy->isVectorTy();
2499  bool DstVec = DestTy->isVectorTy();
2500
2501  Assert(SrcVec == DstVec,
2502         "FPToSI source and dest must both be vector or scalar", &I);
2503  Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2504         &I);
2505  Assert(DestTy->isIntOrIntVectorTy(),
2506         "FPToSI result must be integer or integer vector", &I);
2507
2508  if (SrcVec && DstVec)
2509    Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2510               cast<VectorType>(DestTy)->getNumElements(),
2511           "FPToSI source and dest vector length mismatch", &I);
2512
2513  visitInstruction(I);
2514}
2515
2516void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2517  // Get the source and destination types
2518  Type *SrcTy = I.getOperand(0)->getType();
2519  Type *DestTy = I.getType();
2520
2521  Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2522
2523  if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2524    Assert(!DL.isNonIntegralPointerType(PTy),
2525           "ptrtoint not supported for non-integral pointers");
2526
2527  Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2528  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2529         &I);
2530
2531  if (SrcTy->isVectorTy()) {
2532    VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2533    VectorType *VDest = dyn_cast<VectorType>(DestTy);
2534    Assert(VSrc->getNumElements() == VDest->getNumElements(),
2535           "PtrToInt Vector width mismatch", &I);
2536  }
2537
2538  visitInstruction(I);
2539}
2540
2541void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2542  // Get the source and destination types
2543  Type *SrcTy = I.getOperand(0)->getType();
2544  Type *DestTy = I.getType();
2545
2546  Assert(SrcTy->isIntOrIntVectorTy(),
2547         "IntToPtr source must be an integral", &I);
2548  Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2549
2550  if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2551    Assert(!DL.isNonIntegralPointerType(PTy),
2552           "inttoptr not supported for non-integral pointers");
2553
2554  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2555         &I);
2556  if (SrcTy->isVectorTy()) {
2557    VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2558    VectorType *VDest = dyn_cast<VectorType>(DestTy);
2559    Assert(VSrc->getNumElements() == VDest->getNumElements(),
2560           "IntToPtr Vector width mismatch", &I);
2561  }
2562  visitInstruction(I);
2563}
2564
2565void Verifier::visitBitCastInst(BitCastInst &I) {
2566  Assert(
2567      CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2568      "Invalid bitcast", &I);
2569  visitInstruction(I);
2570}
2571
2572void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2573  Type *SrcTy = I.getOperand(0)->getType();
2574  Type *DestTy = I.getType();
2575
2576  Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2577         &I);
2578  Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2579         &I);
2580  Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2581         "AddrSpaceCast must be between different address spaces", &I);
2582  if (SrcTy->isVectorTy())
2583    Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2584           "AddrSpaceCast vector pointer number of elements mismatch", &I);
2585  visitInstruction(I);
2586}
2587
2588/// visitPHINode - Ensure that a PHI node is well formed.
2589///
2590void Verifier::visitPHINode(PHINode &PN) {
2591  // Ensure that the PHI nodes are all grouped together at the top of the block.
2592  // This can be tested by checking whether the instruction before this is
2593  // either nonexistent (because this is begin()) or is a PHI node.  If not,
2594  // then there is some other instruction before a PHI.
2595  Assert(&PN == &PN.getParent()->front() ||
2596             isa<PHINode>(--BasicBlock::iterator(&PN)),
2597         "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2598
2599  // Check that a PHI doesn't yield a Token.
2600  Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2601
2602  // Check that all of the values of the PHI node have the same type as the
2603  // result, and that the incoming blocks are really basic blocks.
2604  for (Value *IncValue : PN.incoming_values()) {
2605    Assert(PN.getType() == IncValue->getType(),
2606           "PHI node operands are not the same type as the result!", &PN);
2607  }
2608
2609  // All other PHI node constraints are checked in the visitBasicBlock method.
2610
2611  visitInstruction(PN);
2612}
2613
2614void Verifier::verifyCallSite(CallSite CS) {
2615  Instruction *I = CS.getInstruction();
2616
2617  Assert(CS.getCalledValue()->getType()->isPointerTy(),
2618         "Called function must be a pointer!", I);
2619  PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
2620
2621  Assert(FPTy->getElementType()->isFunctionTy(),
2622         "Called function is not pointer to function type!", I);
2623
2624  Assert(FPTy->getElementType() == CS.getFunctionType(),
2625         "Called function is not the same type as the call!", I);
2626
2627  FunctionType *FTy = CS.getFunctionType();
2628
2629  // Verify that the correct number of arguments are being passed
2630  if (FTy->isVarArg())
2631    Assert(CS.arg_size() >= FTy->getNumParams(),
2632           "Called function requires more parameters than were provided!", I);
2633  else
2634    Assert(CS.arg_size() == FTy->getNumParams(),
2635           "Incorrect number of arguments passed to called function!", I);
2636
2637  // Verify that all arguments to the call match the function type.
2638  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2639    Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
2640           "Call parameter type does not match function signature!",
2641           CS.getArgument(i), FTy->getParamType(i), I);
2642
2643  AttributeList Attrs = CS.getAttributes();
2644
2645  Assert(verifyAttributeCount(Attrs, CS.arg_size()),
2646         "Attribute after last parameter!", I);
2647
2648  if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2649    // Don't allow speculatable on call sites, unless the underlying function
2650    // declaration is also speculatable.
2651    Function *Callee
2652      = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2653    Assert(Callee && Callee->isSpeculatable(),
2654           "speculatable attribute may not apply to call sites", I);
2655  }
2656
2657  // Verify call attributes.
2658  verifyFunctionAttrs(FTy, Attrs, I);
2659
2660  // Conservatively check the inalloca argument.
2661  // We have a bug if we can find that there is an underlying alloca without
2662  // inalloca.
2663  if (CS.hasInAllocaArgument()) {
2664    Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
2665    if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2666      Assert(AI->isUsedWithInAlloca(),
2667             "inalloca argument for call has mismatched alloca", AI, I);
2668  }
2669
2670  // For each argument of the callsite, if it has the swifterror argument,
2671  // make sure the underlying alloca/parameter it comes from has a swifterror as
2672  // well.
2673  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2674    if (CS.paramHasAttr(i, Attribute::SwiftError)) {
2675      Value *SwiftErrorArg = CS.getArgument(i);
2676      if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2677        Assert(AI->isSwiftError(),
2678               "swifterror argument for call has mismatched alloca", AI, I);
2679        continue;
2680      }
2681      auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2682      Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
2683      Assert(ArgI->hasSwiftErrorAttr(),
2684             "swifterror argument for call has mismatched parameter", ArgI, I);
2685    }
2686
2687  if (FTy->isVarArg()) {
2688    // FIXME? is 'nest' even legal here?
2689    bool SawNest = false;
2690    bool SawReturned = false;
2691
2692    for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2693      if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2694        SawNest = true;
2695      if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2696        SawReturned = true;
2697    }
2698
2699    // Check attributes on the varargs part.
2700    for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
2701      Type *Ty = CS.getArgument(Idx)->getType();
2702      AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2703      verifyParameterAttrs(ArgAttrs, Ty, I);
2704
2705      if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2706        Assert(!SawNest, "More than one parameter has attribute nest!", I);
2707        SawNest = true;
2708      }
2709
2710      if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2711        Assert(!SawReturned, "More than one parameter has attribute returned!",
2712               I);
2713        Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2714               "Incompatible argument and return types for 'returned' "
2715               "attribute",
2716               I);
2717        SawReturned = true;
2718      }
2719
2720      Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2721             "Attribute 'sret' cannot be used for vararg call arguments!", I);
2722
2723      if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2724        Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
2725               I);
2726    }
2727  }
2728
2729  // Verify that there's no metadata unless it's a direct call to an intrinsic.
2730  if (CS.getCalledFunction() == nullptr ||
2731      !CS.getCalledFunction()->getName().startswith("llvm.")) {
2732    for (Type *ParamTy : FTy->params()) {
2733      Assert(!ParamTy->isMetadataTy(),
2734             "Function has metadata parameter but isn't an intrinsic", I);
2735      Assert(!ParamTy->isTokenTy(),
2736             "Function has token parameter but isn't an intrinsic", I);
2737    }
2738  }
2739
2740  // Verify that indirect calls don't return tokens.
2741  if (CS.getCalledFunction() == nullptr)
2742    Assert(!FTy->getReturnType()->isTokenTy(),
2743           "Return type cannot be token for indirect call!");
2744
2745  if (Function *F = CS.getCalledFunction())
2746    if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
2747      visitIntrinsicCallSite(ID, CS);
2748
2749  // Verify that a callsite has at most one "deopt", at most one "funclet" and
2750  // at most one "gc-transition" operand bundle.
2751  bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2752       FoundGCTransitionBundle = false;
2753  for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
2754    OperandBundleUse BU = CS.getOperandBundleAt(i);
2755    uint32_t Tag = BU.getTagID();
2756    if (Tag == LLVMContext::OB_deopt) {
2757      Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
2758      FoundDeoptBundle = true;
2759    } else if (Tag == LLVMContext::OB_gc_transition) {
2760      Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2761             I);
2762      FoundGCTransitionBundle = true;
2763    } else if (Tag == LLVMContext::OB_funclet) {
2764      Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
2765      FoundFuncletBundle = true;
2766      Assert(BU.Inputs.size() == 1,
2767             "Expected exactly one funclet bundle operand", I);
2768      Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2769             "Funclet bundle operands should correspond to a FuncletPadInst",
2770             I);
2771    }
2772  }
2773
2774  // Verify that each inlinable callsite of a debug-info-bearing function in a
2775  // debug-info-bearing function has a debug location attached to it. Failure to
2776  // do so causes assertion failures when the inliner sets up inline scope info.
2777  if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
2778      CS.getCalledFunction()->getSubprogram())
2779    AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
2780                               "debug info must have a !dbg location",
2781             I);
2782
2783  visitInstruction(*I);
2784}
2785
2786/// Two types are "congruent" if they are identical, or if they are both pointer
2787/// types with different pointee types and the same address space.
2788static bool isTypeCongruent(Type *L, Type *R) {
2789  if (L == R)
2790    return true;
2791  PointerType *PL = dyn_cast<PointerType>(L);
2792  PointerType *PR = dyn_cast<PointerType>(R);
2793  if (!PL || !PR)
2794    return false;
2795  return PL->getAddressSpace() == PR->getAddressSpace();
2796}
2797
2798static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
2799  static const Attribute::AttrKind ABIAttrs[] = {
2800      Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
2801      Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
2802      Attribute::SwiftError};
2803  AttrBuilder Copy;
2804  for (auto AK : ABIAttrs) {
2805    if (Attrs.hasParamAttribute(I, AK))
2806      Copy.addAttribute(AK);
2807  }
2808  if (Attrs.hasParamAttribute(I, Attribute::Alignment))
2809    Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
2810  return Copy;
2811}
2812
2813void Verifier::verifyMustTailCall(CallInst &CI) {
2814  Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
2815
2816  // - The caller and callee prototypes must match.  Pointer types of
2817  //   parameters or return types may differ in pointee type, but not
2818  //   address space.
2819  Function *F = CI.getParent()->getParent();
2820  FunctionType *CallerTy = F->getFunctionType();
2821  FunctionType *CalleeTy = CI.getFunctionType();
2822  Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
2823         "cannot guarantee tail call due to mismatched parameter counts", &CI);
2824  Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
2825         "cannot guarantee tail call due to mismatched varargs", &CI);
2826  Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
2827         "cannot guarantee tail call due to mismatched return types", &CI);
2828  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2829    Assert(
2830        isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
2831        "cannot guarantee tail call due to mismatched parameter types", &CI);
2832  }
2833
2834  // - The calling conventions of the caller and callee must match.
2835  Assert(F->getCallingConv() == CI.getCallingConv(),
2836         "cannot guarantee tail call due to mismatched calling conv", &CI);
2837
2838  // - All ABI-impacting function attributes, such as sret, byval, inreg,
2839  //   returned, and inalloca, must match.
2840  AttributeList CallerAttrs = F->getAttributes();
2841  AttributeList CalleeAttrs = CI.getAttributes();
2842  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2843    AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
2844    AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
2845    Assert(CallerABIAttrs == CalleeABIAttrs,
2846           "cannot guarantee tail call due to mismatched ABI impacting "
2847           "function attributes",
2848           &CI, CI.getOperand(I));
2849  }
2850
2851  // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
2852  //   or a pointer bitcast followed by a ret instruction.
2853  // - The ret instruction must return the (possibly bitcasted) value
2854  //   produced by the call or void.
2855  Value *RetVal = &CI;
2856  Instruction *Next = CI.getNextNode();
2857
2858  // Handle the optional bitcast.
2859  if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
2860    Assert(BI->getOperand(0) == RetVal,
2861           "bitcast following musttail call must use the call", BI);
2862    RetVal = BI;
2863    Next = BI->getNextNode();
2864  }
2865
2866  // Check the return.
2867  ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
2868  Assert(Ret, "musttail call must be precede a ret with an optional bitcast",
2869         &CI);
2870  Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
2871         "musttail call result must be returned", Ret);
2872}
2873
2874void Verifier::visitCallInst(CallInst &CI) {
2875  verifyCallSite(&CI);
2876
2877  if (CI.isMustTailCall())
2878    verifyMustTailCall(CI);
2879}
2880
2881void Verifier::visitInvokeInst(InvokeInst &II) {
2882  verifyCallSite(&II);
2883
2884  // Verify that the first non-PHI instruction of the unwind destination is an
2885  // exception handling instruction.
2886  Assert(
2887      II.getUnwindDest()->isEHPad(),
2888      "The unwind destination does not have an exception handling instruction!",
2889      &II);
2890
2891  visitTerminatorInst(II);
2892}
2893
2894/// visitBinaryOperator - Check that both arguments to the binary operator are
2895/// of the same type!
2896///
2897void Verifier::visitBinaryOperator(BinaryOperator &B) {
2898  Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
2899         "Both operands to a binary operator are not of the same type!", &B);
2900
2901  switch (B.getOpcode()) {
2902  // Check that integer arithmetic operators are only used with
2903  // integral operands.
2904  case Instruction::Add:
2905  case Instruction::Sub:
2906  case Instruction::Mul:
2907  case Instruction::SDiv:
2908  case Instruction::UDiv:
2909  case Instruction::SRem:
2910  case Instruction::URem:
2911    Assert(B.getType()->isIntOrIntVectorTy(),
2912           "Integer arithmetic operators only work with integral types!", &B);
2913    Assert(B.getType() == B.getOperand(0)->getType(),
2914           "Integer arithmetic operators must have same type "
2915           "for operands and result!",
2916           &B);
2917    break;
2918  // Check that floating-point arithmetic operators are only used with
2919  // floating-point operands.
2920  case Instruction::FAdd:
2921  case Instruction::FSub:
2922  case Instruction::FMul:
2923  case Instruction::FDiv:
2924  case Instruction::FRem:
2925    Assert(B.getType()->isFPOrFPVectorTy(),
2926           "Floating-point arithmetic operators only work with "
2927           "floating-point types!",
2928           &B);
2929    Assert(B.getType() == B.getOperand(0)->getType(),
2930           "Floating-point arithmetic operators must have same type "
2931           "for operands and result!",
2932           &B);
2933    break;
2934  // Check that logical operators are only used with integral operands.
2935  case Instruction::And:
2936  case Instruction::Or:
2937  case Instruction::Xor:
2938    Assert(B.getType()->isIntOrIntVectorTy(),
2939           "Logical operators only work with integral types!", &B);
2940    Assert(B.getType() == B.getOperand(0)->getType(),
2941           "Logical operators must have same type for operands and result!",
2942           &B);
2943    break;
2944  case Instruction::Shl:
2945  case Instruction::LShr:
2946  case Instruction::AShr:
2947    Assert(B.getType()->isIntOrIntVectorTy(),
2948           "Shifts only work with integral types!", &B);
2949    Assert(B.getType() == B.getOperand(0)->getType(),
2950           "Shift return type must be same as operands!", &B);
2951    break;
2952  default:
2953    llvm_unreachable("Unknown BinaryOperator opcode!");
2954  }
2955
2956  visitInstruction(B);
2957}
2958
2959void Verifier::visitICmpInst(ICmpInst &IC) {
2960  // Check that the operands are the same type
2961  Type *Op0Ty = IC.getOperand(0)->getType();
2962  Type *Op1Ty = IC.getOperand(1)->getType();
2963  Assert(Op0Ty == Op1Ty,
2964         "Both operands to ICmp instruction are not of the same type!", &IC);
2965  // Check that the operands are the right type
2966  Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
2967         "Invalid operand types for ICmp instruction", &IC);
2968  // Check that the predicate is valid.
2969  Assert(IC.isIntPredicate(),
2970         "Invalid predicate in ICmp instruction!", &IC);
2971
2972  visitInstruction(IC);
2973}
2974
2975void Verifier::visitFCmpInst(FCmpInst &FC) {
2976  // Check that the operands are the same type
2977  Type *Op0Ty = FC.getOperand(0)->getType();
2978  Type *Op1Ty = FC.getOperand(1)->getType();
2979  Assert(Op0Ty == Op1Ty,
2980         "Both operands to FCmp instruction are not of the same type!", &FC);
2981  // Check that the operands are the right type
2982  Assert(Op0Ty->isFPOrFPVectorTy(),
2983         "Invalid operand types for FCmp instruction", &FC);
2984  // Check that the predicate is valid.
2985  Assert(FC.isFPPredicate(),
2986         "Invalid predicate in FCmp instruction!", &FC);
2987
2988  visitInstruction(FC);
2989}
2990
2991void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
2992  Assert(
2993      ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
2994      "Invalid extractelement operands!", &EI);
2995  visitInstruction(EI);
2996}
2997
2998void Verifier::visitInsertElementInst(InsertElementInst &IE) {
2999  Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3000                                            IE.getOperand(2)),
3001         "Invalid insertelement operands!", &IE);
3002  visitInstruction(IE);
3003}
3004
3005void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3006  Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3007                                            SV.getOperand(2)),
3008         "Invalid shufflevector operands!", &SV);
3009  visitInstruction(SV);
3010}
3011
3012void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3013  Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3014
3015  Assert(isa<PointerType>(TargetTy),
3016         "GEP base pointer is not a vector or a vector of pointers", &GEP);
3017  Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3018
3019  SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3020  Assert(all_of(
3021      Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3022      "GEP indexes must be integers", &GEP);
3023  Type *ElTy =
3024      GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3025  Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3026
3027  Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3028             GEP.getResultElementType() == ElTy,
3029         "GEP is not of right type for indices!", &GEP, ElTy);
3030
3031  if (GEP.getType()->isVectorTy()) {
3032    // Additional checks for vector GEPs.
3033    unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3034    if (GEP.getPointerOperandType()->isVectorTy())
3035      Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3036             "Vector GEP result width doesn't match operand's", &GEP);
3037    for (Value *Idx : Idxs) {
3038      Type *IndexTy = Idx->getType();
3039      if (IndexTy->isVectorTy()) {
3040        unsigned IndexWidth = IndexTy->getVectorNumElements();
3041        Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3042      }
3043      Assert(IndexTy->isIntOrIntVectorTy(),
3044             "All GEP indices should be of integer type");
3045    }
3046  }
3047  visitInstruction(GEP);
3048}
3049
3050static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3051  return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3052}
3053
3054void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3055  assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3056         "precondition violation");
3057
3058  unsigned NumOperands = Range->getNumOperands();
3059  Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3060  unsigned NumRanges = NumOperands / 2;
3061  Assert(NumRanges >= 1, "It should have at least one range!", Range);
3062
3063  ConstantRange LastRange(1); // Dummy initial value
3064  for (unsigned i = 0; i < NumRanges; ++i) {
3065    ConstantInt *Low =
3066        mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3067    Assert(Low, "The lower limit must be an integer!", Low);
3068    ConstantInt *High =
3069        mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3070    Assert(High, "The upper limit must be an integer!", High);
3071    Assert(High->getType() == Low->getType() && High->getType() == Ty,
3072           "Range types must match instruction type!", &I);
3073
3074    APInt HighV = High->getValue();
3075    APInt LowV = Low->getValue();
3076    ConstantRange CurRange(LowV, HighV);
3077    Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3078           "Range must not be empty!", Range);
3079    if (i != 0) {
3080      Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3081             "Intervals are overlapping", Range);
3082      Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3083             Range);
3084      Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3085             Range);
3086    }
3087    LastRange = ConstantRange(LowV, HighV);
3088  }
3089  if (NumRanges > 2) {
3090    APInt FirstLow =
3091        mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3092    APInt FirstHigh =
3093        mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3094    ConstantRange FirstRange(FirstLow, FirstHigh);
3095    Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3096           "Intervals are overlapping", Range);
3097    Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3098           Range);
3099  }
3100}
3101
3102void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3103  unsigned Size = DL.getTypeSizeInBits(Ty);
3104  Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3105  Assert(!(Size & (Size - 1)),
3106         "atomic memory access' operand must have a power-of-two size", Ty, I);
3107}
3108
3109void Verifier::visitLoadInst(LoadInst &LI) {
3110  PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3111  Assert(PTy, "Load operand must be a pointer.", &LI);
3112  Type *ElTy = LI.getType();
3113  Assert(LI.getAlignment() <= Value::MaximumAlignment,
3114         "huge alignment values are unsupported", &LI);
3115  Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3116  if (LI.isAtomic()) {
3117    Assert(LI.getOrdering() != AtomicOrdering::Release &&
3118               LI.getOrdering() != AtomicOrdering::AcquireRelease,
3119           "Load cannot have Release ordering", &LI);
3120    Assert(LI.getAlignment() != 0,
3121           "Atomic load must specify explicit alignment", &LI);
3122    Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
3123               ElTy->isFloatingPointTy(),
3124           "atomic load operand must have integer, pointer, or floating point "
3125           "type!",
3126           ElTy, &LI);
3127    checkAtomicMemAccessSize(ElTy, &LI);
3128  } else {
3129    Assert(LI.getSyncScopeID() == SyncScope::System,
3130           "Non-atomic load cannot have SynchronizationScope specified", &LI);
3131  }
3132
3133  visitInstruction(LI);
3134}
3135
3136void Verifier::visitStoreInst(StoreInst &SI) {
3137  PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3138  Assert(PTy, "Store operand must be a pointer.", &SI);
3139  Type *ElTy = PTy->getElementType();
3140  Assert(ElTy == SI.getOperand(0)->getType(),
3141         "Stored value type does not match pointer operand type!", &SI, ElTy);
3142  Assert(SI.getAlignment() <= Value::MaximumAlignment,
3143         "huge alignment values are unsupported", &SI);
3144  Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3145  if (SI.isAtomic()) {
3146    Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3147               SI.getOrdering() != AtomicOrdering::AcquireRelease,
3148           "Store cannot have Acquire ordering", &SI);
3149    Assert(SI.getAlignment() != 0,
3150           "Atomic store must specify explicit alignment", &SI);
3151    Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
3152               ElTy->isFloatingPointTy(),
3153           "atomic store operand must have integer, pointer, or floating point "
3154           "type!",
3155           ElTy, &SI);
3156    checkAtomicMemAccessSize(ElTy, &SI);
3157  } else {
3158    Assert(SI.getSyncScopeID() == SyncScope::System,
3159           "Non-atomic store cannot have SynchronizationScope specified", &SI);
3160  }
3161  visitInstruction(SI);
3162}
3163
3164/// Check that SwiftErrorVal is used as a swifterror argument in CS.
3165void Verifier::verifySwiftErrorCallSite(CallSite CS,
3166                                        const Value *SwiftErrorVal) {
3167  unsigned Idx = 0;
3168  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
3169       I != E; ++I, ++Idx) {
3170    if (*I == SwiftErrorVal) {
3171      Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
3172             "swifterror value when used in a callsite should be marked "
3173             "with swifterror attribute",
3174              SwiftErrorVal, CS);
3175    }
3176  }
3177}
3178
3179void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3180  // Check that swifterror value is only used by loads, stores, or as
3181  // a swifterror argument.
3182  for (const User *U : SwiftErrorVal->users()) {
3183    Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3184           isa<InvokeInst>(U),
3185           "swifterror value can only be loaded and stored from, or "
3186           "as a swifterror argument!",
3187           SwiftErrorVal, U);
3188    // If it is used by a store, check it is the second operand.
3189    if (auto StoreI = dyn_cast<StoreInst>(U))
3190      Assert(StoreI->getOperand(1) == SwiftErrorVal,
3191             "swifterror value should be the second operand when used "
3192             "by stores", SwiftErrorVal, U);
3193    if (auto CallI = dyn_cast<CallInst>(U))
3194      verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
3195    if (auto II = dyn_cast<InvokeInst>(U))
3196      verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
3197  }
3198}
3199
3200void Verifier::visitAllocaInst(AllocaInst &AI) {
3201  SmallPtrSet<Type*, 4> Visited;
3202  PointerType *PTy = AI.getType();
3203  // TODO: Relax this restriction?
3204  Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3205         "Allocation instruction pointer not in the stack address space!",
3206         &AI);
3207  Assert(AI.getAllocatedType()->isSized(&Visited),
3208         "Cannot allocate unsized type", &AI);
3209  Assert(AI.getArraySize()->getType()->isIntegerTy(),
3210         "Alloca array size must have integer type", &AI);
3211  Assert(AI.getAlignment() <= Value::MaximumAlignment,
3212         "huge alignment values are unsupported", &AI);
3213
3214  if (AI.isSwiftError()) {
3215    verifySwiftErrorValue(&AI);
3216  }
3217
3218  visitInstruction(AI);
3219}
3220
3221void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3222
3223  // FIXME: more conditions???
3224  Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3225         "cmpxchg instructions must be atomic.", &CXI);
3226  Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3227         "cmpxchg instructions must be atomic.", &CXI);
3228  Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3229         "cmpxchg instructions cannot be unordered.", &CXI);
3230  Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3231         "cmpxchg instructions cannot be unordered.", &CXI);
3232  Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3233         "cmpxchg instructions failure argument shall be no stronger than the "
3234         "success argument",
3235         &CXI);
3236  Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3237             CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3238         "cmpxchg failure ordering cannot include release semantics", &CXI);
3239
3240  PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3241  Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3242  Type *ElTy = PTy->getElementType();
3243  Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
3244        "cmpxchg operand must have integer or pointer type",
3245         ElTy, &CXI);
3246  checkAtomicMemAccessSize(ElTy, &CXI);
3247  Assert(ElTy == CXI.getOperand(1)->getType(),
3248         "Expected value type does not match pointer operand type!", &CXI,
3249         ElTy);
3250  Assert(ElTy == CXI.getOperand(2)->getType(),
3251         "Stored value type does not match pointer operand type!", &CXI, ElTy);
3252  visitInstruction(CXI);
3253}
3254
3255void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3256  Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3257         "atomicrmw instructions must be atomic.", &RMWI);
3258  Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3259         "atomicrmw instructions cannot be unordered.", &RMWI);
3260  PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3261  Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3262  Type *ElTy = PTy->getElementType();
3263  Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!",
3264         &RMWI, ElTy);
3265  checkAtomicMemAccessSize(ElTy, &RMWI);
3266  Assert(ElTy == RMWI.getOperand(1)->getType(),
3267         "Argument value type does not match pointer operand type!", &RMWI,
3268         ElTy);
3269  Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
3270             RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
3271         "Invalid binary operation!", &RMWI);
3272  visitInstruction(RMWI);
3273}
3274
3275void Verifier::visitFenceInst(FenceInst &FI) {
3276  const AtomicOrdering Ordering = FI.getOrdering();
3277  Assert(Ordering == AtomicOrdering::Acquire ||
3278             Ordering == AtomicOrdering::Release ||
3279             Ordering == AtomicOrdering::AcquireRelease ||
3280             Ordering == AtomicOrdering::SequentiallyConsistent,
3281         "fence instructions may only have acquire, release, acq_rel, or "
3282         "seq_cst ordering.",
3283         &FI);
3284  visitInstruction(FI);
3285}
3286
3287void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3288  Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3289                                          EVI.getIndices()) == EVI.getType(),
3290         "Invalid ExtractValueInst operands!", &EVI);
3291
3292  visitInstruction(EVI);
3293}
3294
3295void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3296  Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3297                                          IVI.getIndices()) ==
3298             IVI.getOperand(1)->getType(),
3299         "Invalid InsertValueInst operands!", &IVI);
3300
3301  visitInstruction(IVI);
3302}
3303
3304static Value *getParentPad(Value *EHPad) {
3305  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3306    return FPI->getParentPad();
3307
3308  return cast<CatchSwitchInst>(EHPad)->getParentPad();
3309}
3310
3311void Verifier::visitEHPadPredecessors(Instruction &I) {
3312  assert(I.isEHPad());
3313
3314  BasicBlock *BB = I.getParent();
3315  Function *F = BB->getParent();
3316
3317  Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3318
3319  if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3320    // The landingpad instruction defines its parent as a landing pad block. The
3321    // landing pad block may be branched to only by the unwind edge of an
3322    // invoke.
3323    for (BasicBlock *PredBB : predecessors(BB)) {
3324      const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3325      Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3326             "Block containing LandingPadInst must be jumped to "
3327             "only by the unwind edge of an invoke.",
3328             LPI);
3329    }
3330    return;
3331  }
3332  if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3333    if (!pred_empty(BB))
3334      Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3335             "Block containg CatchPadInst must be jumped to "
3336             "only by its catchswitch.",
3337             CPI);
3338    Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3339           "Catchswitch cannot unwind to one of its catchpads",
3340           CPI->getCatchSwitch(), CPI);
3341    return;
3342  }
3343
3344  // Verify that each pred has a legal terminator with a legal to/from EH
3345  // pad relationship.
3346  Instruction *ToPad = &I;
3347  Value *ToPadParent = getParentPad(ToPad);
3348  for (BasicBlock *PredBB : predecessors(BB)) {
3349    TerminatorInst *TI = PredBB->getTerminator();
3350    Value *FromPad;
3351    if (auto *II = dyn_cast<InvokeInst>(TI)) {
3352      Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3353             "EH pad must be jumped to via an unwind edge", ToPad, II);
3354      if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3355        FromPad = Bundle->Inputs[0];
3356      else
3357        FromPad = ConstantTokenNone::get(II->getContext());
3358    } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3359      FromPad = CRI->getOperand(0);
3360      Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3361    } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3362      FromPad = CSI;
3363    } else {
3364      Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3365    }
3366
3367    // The edge may exit from zero or more nested pads.
3368    SmallSet<Value *, 8> Seen;
3369    for (;; FromPad = getParentPad(FromPad)) {
3370      Assert(FromPad != ToPad,
3371             "EH pad cannot handle exceptions raised within it", FromPad, TI);
3372      if (FromPad == ToPadParent) {
3373        // This is a legal unwind edge.
3374        break;
3375      }
3376      Assert(!isa<ConstantTokenNone>(FromPad),
3377             "A single unwind edge may only enter one EH pad", TI);
3378      Assert(Seen.insert(FromPad).second,
3379             "EH pad jumps through a cycle of pads", FromPad);
3380    }
3381  }
3382}
3383
3384void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3385  // The landingpad instruction is ill-formed if it doesn't have any clauses and
3386  // isn't a cleanup.
3387  Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3388         "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3389
3390  visitEHPadPredecessors(LPI);
3391
3392  if (!LandingPadResultTy)
3393    LandingPadResultTy = LPI.getType();
3394  else
3395    Assert(LandingPadResultTy == LPI.getType(),
3396           "The landingpad instruction should have a consistent result type "
3397           "inside a function.",
3398           &LPI);
3399
3400  Function *F = LPI.getParent()->getParent();
3401  Assert(F->hasPersonalityFn(),
3402         "LandingPadInst needs to be in a function with a personality.", &LPI);
3403
3404  // The landingpad instruction must be the first non-PHI instruction in the
3405  // block.
3406  Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3407         "LandingPadInst not the first non-PHI instruction in the block.",
3408         &LPI);
3409
3410  for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3411    Constant *Clause = LPI.getClause(i);
3412    if (LPI.isCatch(i)) {
3413      Assert(isa<PointerType>(Clause->getType()),
3414             "Catch operand does not have pointer type!", &LPI);
3415    } else {
3416      Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3417      Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3418             "Filter operand is not an array of constants!", &LPI);
3419    }
3420  }
3421
3422  visitInstruction(LPI);
3423}
3424
3425void Verifier::visitResumeInst(ResumeInst &RI) {
3426  Assert(RI.getFunction()->hasPersonalityFn(),
3427         "ResumeInst needs to be in a function with a personality.", &RI);
3428
3429  if (!LandingPadResultTy)
3430    LandingPadResultTy = RI.getValue()->getType();
3431  else
3432    Assert(LandingPadResultTy == RI.getValue()->getType(),
3433           "The resume instruction should have a consistent result type "
3434           "inside a function.",
3435           &RI);
3436
3437  visitTerminatorInst(RI);
3438}
3439
3440void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3441  BasicBlock *BB = CPI.getParent();
3442
3443  Function *F = BB->getParent();
3444  Assert(F->hasPersonalityFn(),
3445         "CatchPadInst needs to be in a function with a personality.", &CPI);
3446
3447  Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3448         "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3449         CPI.getParentPad());
3450
3451  // The catchpad instruction must be the first non-PHI instruction in the
3452  // block.
3453  Assert(BB->getFirstNonPHI() == &CPI,
3454         "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3455
3456  visitEHPadPredecessors(CPI);
3457  visitFuncletPadInst(CPI);
3458}
3459
3460void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3461  Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3462         "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3463         CatchReturn.getOperand(0));
3464
3465  visitTerminatorInst(CatchReturn);
3466}
3467
3468void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3469  BasicBlock *BB = CPI.getParent();
3470
3471  Function *F = BB->getParent();
3472  Assert(F->hasPersonalityFn(),
3473         "CleanupPadInst needs to be in a function with a personality.", &CPI);
3474
3475  // The cleanuppad instruction must be the first non-PHI instruction in the
3476  // block.
3477  Assert(BB->getFirstNonPHI() == &CPI,
3478         "CleanupPadInst not the first non-PHI instruction in the block.",
3479         &CPI);
3480
3481  auto *ParentPad = CPI.getParentPad();
3482  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3483         "CleanupPadInst has an invalid parent.", &CPI);
3484
3485  visitEHPadPredecessors(CPI);
3486  visitFuncletPadInst(CPI);
3487}
3488
3489void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3490  User *FirstUser = nullptr;
3491  Value *FirstUnwindPad = nullptr;
3492  SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3493  SmallSet<FuncletPadInst *, 8> Seen;
3494
3495  while (!Worklist.empty()) {
3496    FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3497    Assert(Seen.insert(CurrentPad).second,
3498           "FuncletPadInst must not be nested within itself", CurrentPad);
3499    Value *UnresolvedAncestorPad = nullptr;
3500    for (User *U : CurrentPad->users()) {
3501      BasicBlock *UnwindDest;
3502      if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3503        UnwindDest = CRI->getUnwindDest();
3504      } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3505        // We allow catchswitch unwind to caller to nest
3506        // within an outer pad that unwinds somewhere else,
3507        // because catchswitch doesn't have a nounwind variant.
3508        // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3509        if (CSI->unwindsToCaller())
3510          continue;
3511        UnwindDest = CSI->getUnwindDest();
3512      } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3513        UnwindDest = II->getUnwindDest();
3514      } else if (isa<CallInst>(U)) {
3515        // Calls which don't unwind may be found inside funclet
3516        // pads that unwind somewhere else.  We don't *require*
3517        // such calls to be annotated nounwind.
3518        continue;
3519      } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3520        // The unwind dest for a cleanup can only be found by
3521        // recursive search.  Add it to the worklist, and we'll
3522        // search for its first use that determines where it unwinds.
3523        Worklist.push_back(CPI);
3524        continue;
3525      } else {
3526        Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3527        continue;
3528      }
3529
3530      Value *UnwindPad;
3531      bool ExitsFPI;
3532      if (UnwindDest) {
3533        UnwindPad = UnwindDest->getFirstNonPHI();
3534        if (!cast<Instruction>(UnwindPad)->isEHPad())
3535          continue;
3536        Value *UnwindParent = getParentPad(UnwindPad);
3537        // Ignore unwind edges that don't exit CurrentPad.
3538        if (UnwindParent == CurrentPad)
3539          continue;
3540        // Determine whether the original funclet pad is exited,
3541        // and if we are scanning nested pads determine how many
3542        // of them are exited so we can stop searching their
3543        // children.
3544        Value *ExitedPad = CurrentPad;
3545        ExitsFPI = false;
3546        do {
3547          if (ExitedPad == &FPI) {
3548            ExitsFPI = true;
3549            // Now we can resolve any ancestors of CurrentPad up to
3550            // FPI, but not including FPI since we need to make sure
3551            // to check all direct users of FPI for consistency.
3552            UnresolvedAncestorPad = &FPI;
3553            break;
3554          }
3555          Value *ExitedParent = getParentPad(ExitedPad);
3556          if (ExitedParent == UnwindParent) {
3557            // ExitedPad is the ancestor-most pad which this unwind
3558            // edge exits, so we can resolve up to it, meaning that
3559            // ExitedParent is the first ancestor still unresolved.
3560            UnresolvedAncestorPad = ExitedParent;
3561            break;
3562          }
3563          ExitedPad = ExitedParent;
3564        } while (!isa<ConstantTokenNone>(ExitedPad));
3565      } else {
3566        // Unwinding to caller exits all pads.
3567        UnwindPad = ConstantTokenNone::get(FPI.getContext());
3568        ExitsFPI = true;
3569        UnresolvedAncestorPad = &FPI;
3570      }
3571
3572      if (ExitsFPI) {
3573        // This unwind edge exits FPI.  Make sure it agrees with other
3574        // such edges.
3575        if (FirstUser) {
3576          Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3577                                              "pad must have the same unwind "
3578                                              "dest",
3579                 &FPI, U, FirstUser);
3580        } else {
3581          FirstUser = U;
3582          FirstUnwindPad = UnwindPad;
3583          // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3584          if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3585              getParentPad(UnwindPad) == getParentPad(&FPI))
3586            SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U);
3587        }
3588      }
3589      // Make sure we visit all uses of FPI, but for nested pads stop as
3590      // soon as we know where they unwind to.
3591      if (CurrentPad != &FPI)
3592        break;
3593    }
3594    if (UnresolvedAncestorPad) {
3595      if (CurrentPad == UnresolvedAncestorPad) {
3596        // When CurrentPad is FPI itself, we don't mark it as resolved even if
3597        // we've found an unwind edge that exits it, because we need to verify
3598        // all direct uses of FPI.
3599        assert(CurrentPad == &FPI);
3600        continue;
3601      }
3602      // Pop off the worklist any nested pads that we've found an unwind
3603      // destination for.  The pads on the worklist are the uncles,
3604      // great-uncles, etc. of CurrentPad.  We've found an unwind destination
3605      // for all ancestors of CurrentPad up to but not including
3606      // UnresolvedAncestorPad.
3607      Value *ResolvedPad = CurrentPad;
3608      while (!Worklist.empty()) {
3609        Value *UnclePad = Worklist.back();
3610        Value *AncestorPad = getParentPad(UnclePad);
3611        // Walk ResolvedPad up the ancestor list until we either find the
3612        // uncle's parent or the last resolved ancestor.
3613        while (ResolvedPad != AncestorPad) {
3614          Value *ResolvedParent = getParentPad(ResolvedPad);
3615          if (ResolvedParent == UnresolvedAncestorPad) {
3616            break;
3617          }
3618          ResolvedPad = ResolvedParent;
3619        }
3620        // If the resolved ancestor search didn't find the uncle's parent,
3621        // then the uncle is not yet resolved.
3622        if (ResolvedPad != AncestorPad)
3623          break;
3624        // This uncle is resolved, so pop it from the worklist.
3625        Worklist.pop_back();
3626      }
3627    }
3628  }
3629
3630  if (FirstUnwindPad) {
3631    if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3632      BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3633      Value *SwitchUnwindPad;
3634      if (SwitchUnwindDest)
3635        SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3636      else
3637        SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3638      Assert(SwitchUnwindPad == FirstUnwindPad,
3639             "Unwind edges out of a catch must have the same unwind dest as "
3640             "the parent catchswitch",
3641             &FPI, FirstUser, CatchSwitch);
3642    }
3643  }
3644
3645  visitInstruction(FPI);
3646}
3647
3648void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3649  BasicBlock *BB = CatchSwitch.getParent();
3650
3651  Function *F = BB->getParent();
3652  Assert(F->hasPersonalityFn(),
3653         "CatchSwitchInst needs to be in a function with a personality.",
3654         &CatchSwitch);
3655
3656  // The catchswitch instruction must be the first non-PHI instruction in the
3657  // block.
3658  Assert(BB->getFirstNonPHI() == &CatchSwitch,
3659         "CatchSwitchInst not the first non-PHI instruction in the block.",
3660         &CatchSwitch);
3661
3662  auto *ParentPad = CatchSwitch.getParentPad();
3663  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3664         "CatchSwitchInst has an invalid parent.", ParentPad);
3665
3666  if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3667    Instruction *I = UnwindDest->getFirstNonPHI();
3668    Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3669           "CatchSwitchInst must unwind to an EH block which is not a "
3670           "landingpad.",
3671           &CatchSwitch);
3672
3673    // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3674    if (getParentPad(I) == ParentPad)
3675      SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3676  }
3677
3678  Assert(CatchSwitch.getNumHandlers() != 0,
3679         "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3680
3681  for (BasicBlock *Handler : CatchSwitch.handlers()) {
3682    Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3683           "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3684  }
3685
3686  visitEHPadPredecessors(CatchSwitch);
3687  visitTerminatorInst(CatchSwitch);
3688}
3689
3690void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3691  Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3692         "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3693         CRI.getOperand(0));
3694
3695  if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3696    Instruction *I = UnwindDest->getFirstNonPHI();
3697    Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3698           "CleanupReturnInst must unwind to an EH block which is not a "
3699           "landingpad.",
3700           &CRI);
3701  }
3702
3703  visitTerminatorInst(CRI);
3704}
3705
3706void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3707  Instruction *Op = cast<Instruction>(I.getOperand(i));
3708  // If the we have an invalid invoke, don't try to compute the dominance.
3709  // We already reject it in the invoke specific checks and the dominance
3710  // computation doesn't handle multiple edges.
3711  if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3712    if (II->getNormalDest() == II->getUnwindDest())
3713      return;
3714  }
3715
3716  // Quick check whether the def has already been encountered in the same block.
3717  // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI
3718  // uses are defined to happen on the incoming edge, not at the instruction.
3719  //
3720  // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3721  // wrapping an SSA value, assert that we've already encountered it.  See
3722  // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3723  if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3724    return;
3725
3726  const Use &U = I.getOperandUse(i);
3727  Assert(DT.dominates(Op, U),
3728         "Instruction does not dominate all uses!", Op, &I);
3729}
3730
3731void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3732  Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3733         "apply only to pointer types", &I);
3734  Assert(isa<LoadInst>(I),
3735         "dereferenceable, dereferenceable_or_null apply only to load"
3736         " instructions, use attributes for calls or invokes", &I);
3737  Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3738         "take one operand!", &I);
3739  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3740  Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
3741         "dereferenceable_or_null metadata value must be an i64!", &I);
3742}
3743
3744/// verifyInstruction - Verify that an instruction is well formed.
3745///
3746void Verifier::visitInstruction(Instruction &I) {
3747  BasicBlock *BB = I.getParent();
3748  Assert(BB, "Instruction not embedded in basic block!", &I);
3749
3750  if (!isa<PHINode>(I)) {   // Check that non-phi nodes are not self referential
3751    for (User *U : I.users()) {
3752      Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
3753             "Only PHI nodes may reference their own value!", &I);
3754    }
3755  }
3756
3757  // Check that void typed values don't have names
3758  Assert(!I.getType()->isVoidTy() || !I.hasName(),
3759         "Instruction has a name, but provides a void value!", &I);
3760
3761  // Check that the return value of the instruction is either void or a legal
3762  // value type.
3763  Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
3764         "Instruction returns a non-scalar type!", &I);
3765
3766  // Check that the instruction doesn't produce metadata. Calls are already
3767  // checked against the callee type.
3768  Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
3769         "Invalid use of metadata!", &I);
3770
3771  // Check that all uses of the instruction, if they are instructions
3772  // themselves, actually have parent basic blocks.  If the use is not an
3773  // instruction, it is an error!
3774  for (Use &U : I.uses()) {
3775    if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
3776      Assert(Used->getParent() != nullptr,
3777             "Instruction referencing"
3778             " instruction not embedded in a basic block!",
3779             &I, Used);
3780    else {
3781      CheckFailed("Use of instruction is not an instruction!", U);
3782      return;
3783    }
3784  }
3785
3786  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
3787    Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
3788
3789    // Check to make sure that only first-class-values are operands to
3790    // instructions.
3791    if (!I.getOperand(i)->getType()->isFirstClassType()) {
3792      Assert(false, "Instruction operands must be first-class values!", &I);
3793    }
3794
3795    if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
3796      // Check to make sure that the "address of" an intrinsic function is never
3797      // taken.
3798      Assert(
3799          !F->isIntrinsic() ||
3800              i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0),
3801          "Cannot take the address of an intrinsic!", &I);
3802      Assert(
3803          !F->isIntrinsic() || isa<CallInst>(I) ||
3804              F->getIntrinsicID() == Intrinsic::donothing ||
3805              F->getIntrinsicID() == Intrinsic::coro_resume ||
3806              F->getIntrinsicID() == Intrinsic::coro_destroy ||
3807              F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
3808              F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
3809              F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
3810          "Cannot invoke an intrinsic other than donothing, patchpoint, "
3811          "statepoint, coro_resume or coro_destroy",
3812          &I);
3813      Assert(F->getParent() == &M, "Referencing function in another module!",
3814             &I, &M, F, F->getParent());
3815    } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
3816      Assert(OpBB->getParent() == BB->getParent(),
3817             "Referring to a basic block in another function!", &I);
3818    } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
3819      Assert(OpArg->getParent() == BB->getParent(),
3820             "Referring to an argument in another function!", &I);
3821    } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
3822      Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
3823             &M, GV, GV->getParent());
3824    } else if (isa<Instruction>(I.getOperand(i))) {
3825      verifyDominatesUse(I, i);
3826    } else if (isa<InlineAsm>(I.getOperand(i))) {
3827      Assert((i + 1 == e && isa<CallInst>(I)) ||
3828                 (i + 3 == e && isa<InvokeInst>(I)),
3829             "Cannot take the address of an inline asm!", &I);
3830    } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
3831      if (CE->getType()->isPtrOrPtrVectorTy() ||
3832          !DL.getNonIntegralAddressSpaces().empty()) {
3833        // If we have a ConstantExpr pointer, we need to see if it came from an
3834        // illegal bitcast.  If the datalayout string specifies non-integral
3835        // address spaces then we also need to check for illegal ptrtoint and
3836        // inttoptr expressions.
3837        visitConstantExprsRecursively(CE);
3838      }
3839    }
3840  }
3841
3842  if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
3843    Assert(I.getType()->isFPOrFPVectorTy(),
3844           "fpmath requires a floating point result!", &I);
3845    Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
3846    if (ConstantFP *CFP0 =
3847            mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
3848      const APFloat &Accuracy = CFP0->getValueAPF();
3849      Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
3850             "fpmath accuracy must have float type", &I);
3851      Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
3852             "fpmath accuracy not a positive number!", &I);
3853    } else {
3854      Assert(false, "invalid fpmath accuracy!", &I);
3855    }
3856  }
3857
3858  if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
3859    Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
3860           "Ranges are only for loads, calls and invokes!", &I);
3861    visitRangeMetadata(I, Range, I.getType());
3862  }
3863
3864  if (I.getMetadata(LLVMContext::MD_nonnull)) {
3865    Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
3866           &I);
3867    Assert(isa<LoadInst>(I),
3868           "nonnull applies only to load instructions, use attributes"
3869           " for calls or invokes",
3870           &I);
3871  }
3872
3873  if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
3874    visitDereferenceableMetadata(I, MD);
3875
3876  if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
3877    visitDereferenceableMetadata(I, MD);
3878
3879  if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
3880    TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
3881
3882  if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
3883    Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
3884           &I);
3885    Assert(isa<LoadInst>(I), "align applies only to load instructions, "
3886           "use attributes for calls or invokes", &I);
3887    Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
3888    ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
3889    Assert(CI && CI->getType()->isIntegerTy(64),
3890           "align metadata value must be an i64!", &I);
3891    uint64_t Align = CI->getZExtValue();
3892    Assert(isPowerOf2_64(Align),
3893           "align metadata value must be a power of 2!", &I);
3894    Assert(Align <= Value::MaximumAlignment,
3895           "alignment is larger that implementation defined limit", &I);
3896  }
3897
3898  if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
3899    AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
3900    visitMDNode(*N);
3901  }
3902
3903  if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I))
3904    verifyFragmentExpression(*DII);
3905
3906  InstsInThisBlock.insert(&I);
3907}
3908
3909/// Allow intrinsics to be verified in different ways.
3910void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
3911  Function *IF = CS.getCalledFunction();
3912  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
3913         IF);
3914
3915  // Verify that the intrinsic prototype lines up with what the .td files
3916  // describe.
3917  FunctionType *IFTy = IF->getFunctionType();
3918  bool IsVarArg = IFTy->isVarArg();
3919
3920  SmallVector<Intrinsic::IITDescriptor, 8> Table;
3921  getIntrinsicInfoTableEntries(ID, Table);
3922  ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
3923
3924  SmallVector<Type *, 4> ArgTys;
3925  Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(),
3926                                        TableRef, ArgTys),
3927         "Intrinsic has incorrect return type!", IF);
3928  for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
3929    Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i),
3930                                          TableRef, ArgTys),
3931           "Intrinsic has incorrect argument type!", IF);
3932
3933  // Verify if the intrinsic call matches the vararg property.
3934  if (IsVarArg)
3935    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
3936           "Intrinsic was not defined with variable arguments!", IF);
3937  else
3938    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
3939           "Callsite was not defined with variable arguments!", IF);
3940
3941  // All descriptors should be absorbed by now.
3942  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
3943
3944  // Now that we have the intrinsic ID and the actual argument types (and we
3945  // know they are legal for the intrinsic!) get the intrinsic name through the
3946  // usual means.  This allows us to verify the mangling of argument types into
3947  // the name.
3948  const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
3949  Assert(ExpectedName == IF->getName(),
3950         "Intrinsic name not mangled correctly for type arguments! "
3951         "Should be: " +
3952             ExpectedName,
3953         IF);
3954
3955  // If the intrinsic takes MDNode arguments, verify that they are either global
3956  // or are local to *this* function.
3957  for (Value *V : CS.args())
3958    if (auto *MD = dyn_cast<MetadataAsValue>(V))
3959      visitMetadataAsValue(*MD, CS.getCaller());
3960
3961  switch (ID) {
3962  default:
3963    break;
3964  case Intrinsic::coro_id: {
3965    auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
3966    if (isa<ConstantPointerNull>(InfoArg))
3967      break;
3968    auto *GV = dyn_cast<GlobalVariable>(InfoArg);
3969    Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
3970      "info argument of llvm.coro.begin must refer to an initialized "
3971      "constant");
3972    Constant *Init = GV->getInitializer();
3973    Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
3974      "info argument of llvm.coro.begin must refer to either a struct or "
3975      "an array");
3976    break;
3977  }
3978  case Intrinsic::ctlz:  // llvm.ctlz
3979  case Intrinsic::cttz:  // llvm.cttz
3980    Assert(isa<ConstantInt>(CS.getArgOperand(1)),
3981           "is_zero_undef argument of bit counting intrinsics must be a "
3982           "constant int",
3983           CS);
3984    break;
3985  case Intrinsic::experimental_constrained_fadd:
3986  case Intrinsic::experimental_constrained_fsub:
3987  case Intrinsic::experimental_constrained_fmul:
3988  case Intrinsic::experimental_constrained_fdiv:
3989  case Intrinsic::experimental_constrained_frem:
3990  case Intrinsic::experimental_constrained_fma:
3991  case Intrinsic::experimental_constrained_sqrt:
3992  case Intrinsic::experimental_constrained_pow:
3993  case Intrinsic::experimental_constrained_powi:
3994  case Intrinsic::experimental_constrained_sin:
3995  case Intrinsic::experimental_constrained_cos:
3996  case Intrinsic::experimental_constrained_exp:
3997  case Intrinsic::experimental_constrained_exp2:
3998  case Intrinsic::experimental_constrained_log:
3999  case Intrinsic::experimental_constrained_log10:
4000  case Intrinsic::experimental_constrained_log2:
4001  case Intrinsic::experimental_constrained_rint:
4002  case Intrinsic::experimental_constrained_nearbyint:
4003    visitConstrainedFPIntrinsic(
4004        cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
4005    break;
4006  case Intrinsic::dbg_declare: // llvm.dbg.declare
4007    Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
4008           "invalid llvm.dbg.declare intrinsic call 1", CS);
4009    visitDbgIntrinsic("declare", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4010    break;
4011  case Intrinsic::dbg_addr: // llvm.dbg.addr
4012    visitDbgIntrinsic("addr", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4013    break;
4014  case Intrinsic::dbg_value: // llvm.dbg.value
4015    visitDbgIntrinsic("value", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
4016    break;
4017  case Intrinsic::memcpy:
4018  case Intrinsic::memmove:
4019  case Intrinsic::memset: {
4020    ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3));
4021    Assert(AlignCI,
4022           "alignment argument of memory intrinsics must be a constant int",
4023           CS);
4024    const APInt &AlignVal = AlignCI->getValue();
4025    Assert(AlignCI->isZero() || AlignVal.isPowerOf2(),
4026           "alignment argument of memory intrinsics must be a power of 2", CS);
4027    Assert(isa<ConstantInt>(CS.getArgOperand(4)),
4028           "isvolatile argument of memory intrinsics must be a constant int",
4029           CS);
4030    break;
4031  }
4032  case Intrinsic::memcpy_element_unordered_atomic: {
4033    const AtomicMemCpyInst *MI = cast<AtomicMemCpyInst>(CS.getInstruction());
4034
4035    ConstantInt *ElementSizeCI =
4036        dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
4037    Assert(ElementSizeCI,
4038           "element size of the element-wise unordered atomic memory "
4039           "intrinsic must be a constant int",
4040           CS);
4041    const APInt &ElementSizeVal = ElementSizeCI->getValue();
4042    Assert(ElementSizeVal.isPowerOf2(),
4043           "element size of the element-wise atomic memory intrinsic "
4044           "must be a power of 2",
4045           CS);
4046
4047    if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4048      uint64_t Length = LengthCI->getZExtValue();
4049      uint64_t ElementSize = MI->getElementSizeInBytes();
4050      Assert((Length % ElementSize) == 0,
4051             "constant length must be a multiple of the element size in the "
4052             "element-wise atomic memory intrinsic",
4053             CS);
4054    }
4055
4056    auto IsValidAlignment = [&](uint64_t Alignment) {
4057      return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4058    };
4059    uint64_t DstAlignment = CS.getParamAlignment(0),
4060             SrcAlignment = CS.getParamAlignment(1);
4061    Assert(IsValidAlignment(DstAlignment),
4062           "incorrect alignment of the destination argument", CS);
4063    Assert(IsValidAlignment(SrcAlignment),
4064           "incorrect alignment of the source argument", CS);
4065    break;
4066  }
4067  case Intrinsic::memmove_element_unordered_atomic: {
4068    auto *MI = cast<AtomicMemMoveInst>(CS.getInstruction());
4069
4070    ConstantInt *ElementSizeCI =
4071        dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
4072    Assert(ElementSizeCI,
4073           "element size of the element-wise unordered atomic memory "
4074           "intrinsic must be a constant int",
4075           CS);
4076    const APInt &ElementSizeVal = ElementSizeCI->getValue();
4077    Assert(ElementSizeVal.isPowerOf2(),
4078           "element size of the element-wise atomic memory intrinsic "
4079           "must be a power of 2",
4080           CS);
4081
4082    if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4083      uint64_t Length = LengthCI->getZExtValue();
4084      uint64_t ElementSize = MI->getElementSizeInBytes();
4085      Assert((Length % ElementSize) == 0,
4086             "constant length must be a multiple of the element size in the "
4087             "element-wise atomic memory intrinsic",
4088             CS);
4089    }
4090
4091    auto IsValidAlignment = [&](uint64_t Alignment) {
4092      return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4093    };
4094    uint64_t DstAlignment = CS.getParamAlignment(0),
4095             SrcAlignment = CS.getParamAlignment(1);
4096    Assert(IsValidAlignment(DstAlignment),
4097           "incorrect alignment of the destination argument", CS);
4098    Assert(IsValidAlignment(SrcAlignment),
4099           "incorrect alignment of the source argument", CS);
4100    break;
4101  }
4102  case Intrinsic::memset_element_unordered_atomic: {
4103    auto *MI = cast<AtomicMemSetInst>(CS.getInstruction());
4104
4105    ConstantInt *ElementSizeCI =
4106        dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
4107    Assert(ElementSizeCI,
4108           "element size of the element-wise unordered atomic memory "
4109           "intrinsic must be a constant int",
4110           CS);
4111    const APInt &ElementSizeVal = ElementSizeCI->getValue();
4112    Assert(ElementSizeVal.isPowerOf2(),
4113           "element size of the element-wise atomic memory intrinsic "
4114           "must be a power of 2",
4115           CS);
4116
4117    if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
4118      uint64_t Length = LengthCI->getZExtValue();
4119      uint64_t ElementSize = MI->getElementSizeInBytes();
4120      Assert((Length % ElementSize) == 0,
4121             "constant length must be a multiple of the element size in the "
4122             "element-wise atomic memory intrinsic",
4123             CS);
4124    }
4125
4126    auto IsValidAlignment = [&](uint64_t Alignment) {
4127      return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4128    };
4129    uint64_t DstAlignment = CS.getParamAlignment(0);
4130    Assert(IsValidAlignment(DstAlignment),
4131           "incorrect alignment of the destination argument", CS);
4132    break;
4133  }
4134  case Intrinsic::gcroot:
4135  case Intrinsic::gcwrite:
4136  case Intrinsic::gcread:
4137    if (ID == Intrinsic::gcroot) {
4138      AllocaInst *AI =
4139        dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
4140      Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
4141      Assert(isa<Constant>(CS.getArgOperand(1)),
4142             "llvm.gcroot parameter #2 must be a constant.", CS);
4143      if (!AI->getAllocatedType()->isPointerTy()) {
4144        Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
4145               "llvm.gcroot parameter #1 must either be a pointer alloca, "
4146               "or argument #2 must be a non-null constant.",
4147               CS);
4148      }
4149    }
4150
4151    Assert(CS.getParent()->getParent()->hasGC(),
4152           "Enclosing function does not use GC.", CS);
4153    break;
4154  case Intrinsic::init_trampoline:
4155    Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
4156           "llvm.init_trampoline parameter #2 must resolve to a function.",
4157           CS);
4158    break;
4159  case Intrinsic::prefetch:
4160    Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
4161               isa<ConstantInt>(CS.getArgOperand(2)) &&
4162               cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
4163               cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
4164           "invalid arguments to llvm.prefetch", CS);
4165    break;
4166  case Intrinsic::stackprotector:
4167    Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
4168           "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
4169    break;
4170  case Intrinsic::lifetime_start:
4171  case Intrinsic::lifetime_end:
4172  case Intrinsic::invariant_start:
4173    Assert(isa<ConstantInt>(CS.getArgOperand(0)),
4174           "size argument of memory use markers must be a constant integer",
4175           CS);
4176    break;
4177  case Intrinsic::invariant_end:
4178    Assert(isa<ConstantInt>(CS.getArgOperand(1)),
4179           "llvm.invariant.end parameter #2 must be a constant integer", CS);
4180    break;
4181
4182  case Intrinsic::localescape: {
4183    BasicBlock *BB = CS.getParent();
4184    Assert(BB == &BB->getParent()->front(),
4185           "llvm.localescape used outside of entry block", CS);
4186    Assert(!SawFrameEscape,
4187           "multiple calls to llvm.localescape in one function", CS);
4188    for (Value *Arg : CS.args()) {
4189      if (isa<ConstantPointerNull>(Arg))
4190        continue; // Null values are allowed as placeholders.
4191      auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4192      Assert(AI && AI->isStaticAlloca(),
4193             "llvm.localescape only accepts static allocas", CS);
4194    }
4195    FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
4196    SawFrameEscape = true;
4197    break;
4198  }
4199  case Intrinsic::localrecover: {
4200    Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
4201    Function *Fn = dyn_cast<Function>(FnArg);
4202    Assert(Fn && !Fn->isDeclaration(),
4203           "llvm.localrecover first "
4204           "argument must be function defined in this module",
4205           CS);
4206    auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
4207    Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
4208           CS);
4209    auto &Entry = FrameEscapeInfo[Fn];
4210    Entry.second = unsigned(
4211        std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4212    break;
4213  }
4214
4215  case Intrinsic::experimental_gc_statepoint:
4216    Assert(!CS.isInlineAsm(),
4217           "gc.statepoint support for inline assembly unimplemented", CS);
4218    Assert(CS.getParent()->getParent()->hasGC(),
4219           "Enclosing function does not use GC.", CS);
4220
4221    verifyStatepoint(CS);
4222    break;
4223  case Intrinsic::experimental_gc_result: {
4224    Assert(CS.getParent()->getParent()->hasGC(),
4225           "Enclosing function does not use GC.", CS);
4226    // Are we tied to a statepoint properly?
4227    CallSite StatepointCS(CS.getArgOperand(0));
4228    const Function *StatepointFn =
4229      StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
4230    Assert(StatepointFn && StatepointFn->isDeclaration() &&
4231               StatepointFn->getIntrinsicID() ==
4232                   Intrinsic::experimental_gc_statepoint,
4233           "gc.result operand #1 must be from a statepoint", CS,
4234           CS.getArgOperand(0));
4235
4236    // Assert that result type matches wrapped callee.
4237    const Value *Target = StatepointCS.getArgument(2);
4238    auto *PT = cast<PointerType>(Target->getType());
4239    auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4240    Assert(CS.getType() == TargetFuncType->getReturnType(),
4241           "gc.result result type does not match wrapped callee", CS);
4242    break;
4243  }
4244  case Intrinsic::experimental_gc_relocate: {
4245    Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
4246
4247    Assert(isa<PointerType>(CS.getType()->getScalarType()),
4248           "gc.relocate must return a pointer or a vector of pointers", CS);
4249
4250    // Check that this relocate is correctly tied to the statepoint
4251
4252    // This is case for relocate on the unwinding path of an invoke statepoint
4253    if (LandingPadInst *LandingPad =
4254          dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
4255
4256      const BasicBlock *InvokeBB =
4257          LandingPad->getParent()->getUniquePredecessor();
4258
4259      // Landingpad relocates should have only one predecessor with invoke
4260      // statepoint terminator
4261      Assert(InvokeBB, "safepoints should have unique landingpads",
4262             LandingPad->getParent());
4263      Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4264             InvokeBB);
4265      Assert(isStatepoint(InvokeBB->getTerminator()),
4266             "gc relocate should be linked to a statepoint", InvokeBB);
4267    }
4268    else {
4269      // In all other cases relocate should be tied to the statepoint directly.
4270      // This covers relocates on a normal return path of invoke statepoint and
4271      // relocates of a call statepoint.
4272      auto Token = CS.getArgOperand(0);
4273      Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4274             "gc relocate is incorrectly tied to the statepoint", CS, Token);
4275    }
4276
4277    // Verify rest of the relocate arguments.
4278
4279    ImmutableCallSite StatepointCS(
4280        cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
4281
4282    // Both the base and derived must be piped through the safepoint.
4283    Value* Base = CS.getArgOperand(1);
4284    Assert(isa<ConstantInt>(Base),
4285           "gc.relocate operand #2 must be integer offset", CS);
4286
4287    Value* Derived = CS.getArgOperand(2);
4288    Assert(isa<ConstantInt>(Derived),
4289           "gc.relocate operand #3 must be integer offset", CS);
4290
4291    const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4292    const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4293    // Check the bounds
4294    Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
4295           "gc.relocate: statepoint base index out of bounds", CS);
4296    Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
4297           "gc.relocate: statepoint derived index out of bounds", CS);
4298
4299    // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4300    // section of the statepoint's argument.
4301    Assert(StatepointCS.arg_size() > 0,
4302           "gc.statepoint: insufficient arguments");
4303    Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
4304           "gc.statement: number of call arguments must be constant integer");
4305    const unsigned NumCallArgs =
4306        cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
4307    Assert(StatepointCS.arg_size() > NumCallArgs + 5,
4308           "gc.statepoint: mismatch in number of call arguments");
4309    Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
4310           "gc.statepoint: number of transition arguments must be "
4311           "a constant integer");
4312    const int NumTransitionArgs =
4313        cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
4314            ->getZExtValue();
4315    const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4316    Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
4317           "gc.statepoint: number of deoptimization arguments must be "
4318           "a constant integer");
4319    const int NumDeoptArgs =
4320        cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
4321            ->getZExtValue();
4322    const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4323    const int GCParamArgsEnd = StatepointCS.arg_size();
4324    Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4325           "gc.relocate: statepoint base index doesn't fall within the "
4326           "'gc parameters' section of the statepoint call",
4327           CS);
4328    Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4329           "gc.relocate: statepoint derived index doesn't fall within the "
4330           "'gc parameters' section of the statepoint call",
4331           CS);
4332
4333    // Relocated value must be either a pointer type or vector-of-pointer type,
4334    // but gc_relocate does not need to return the same pointer type as the
4335    // relocated pointer. It can be casted to the correct type later if it's
4336    // desired. However, they must have the same address space and 'vectorness'
4337    GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
4338    Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4339           "gc.relocate: relocated value must be a gc pointer", CS);
4340
4341    auto ResultType = CS.getType();
4342    auto DerivedType = Relocate.getDerivedPtr()->getType();
4343    Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4344           "gc.relocate: vector relocates to vector and pointer to pointer",
4345           CS);
4346    Assert(
4347        ResultType->getPointerAddressSpace() ==
4348            DerivedType->getPointerAddressSpace(),
4349        "gc.relocate: relocating a pointer shouldn't change its address space",
4350        CS);
4351    break;
4352  }
4353  case Intrinsic::eh_exceptioncode:
4354  case Intrinsic::eh_exceptionpointer: {
4355    Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
4356           "eh.exceptionpointer argument must be a catchpad", CS);
4357    break;
4358  }
4359  case Intrinsic::masked_load: {
4360    Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
4361
4362    Value *Ptr = CS.getArgOperand(0);
4363    //Value *Alignment = CS.getArgOperand(1);
4364    Value *Mask = CS.getArgOperand(2);
4365    Value *PassThru = CS.getArgOperand(3);
4366    Assert(Mask->getType()->isVectorTy(),
4367           "masked_load: mask must be vector", CS);
4368
4369    // DataTy is the overloaded type
4370    Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4371    Assert(DataTy == CS.getType(),
4372           "masked_load: return must match pointer type", CS);
4373    Assert(PassThru->getType() == DataTy,
4374           "masked_load: pass through and data type must match", CS);
4375    Assert(Mask->getType()->getVectorNumElements() ==
4376           DataTy->getVectorNumElements(),
4377           "masked_load: vector mask must be same length as data", CS);
4378    break;
4379  }
4380  case Intrinsic::masked_store: {
4381    Value *Val = CS.getArgOperand(0);
4382    Value *Ptr = CS.getArgOperand(1);
4383    //Value *Alignment = CS.getArgOperand(2);
4384    Value *Mask = CS.getArgOperand(3);
4385    Assert(Mask->getType()->isVectorTy(),
4386           "masked_store: mask must be vector", CS);
4387
4388    // DataTy is the overloaded type
4389    Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4390    Assert(DataTy == Val->getType(),
4391           "masked_store: storee must match pointer type", CS);
4392    Assert(Mask->getType()->getVectorNumElements() ==
4393           DataTy->getVectorNumElements(),
4394           "masked_store: vector mask must be same length as data", CS);
4395    break;
4396  }
4397
4398  case Intrinsic::experimental_guard: {
4399    Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
4400    Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4401           "experimental_guard must have exactly one "
4402           "\"deopt\" operand bundle");
4403    break;
4404  }
4405
4406  case Intrinsic::experimental_deoptimize: {
4407    Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
4408    Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4409           "experimental_deoptimize must have exactly one "
4410           "\"deopt\" operand bundle");
4411    Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
4412           "experimental_deoptimize return type must match caller return type");
4413
4414    if (CS.isCall()) {
4415      auto *DeoptCI = CS.getInstruction();
4416      auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
4417      Assert(RI,
4418             "calls to experimental_deoptimize must be followed by a return");
4419
4420      if (!CS.getType()->isVoidTy() && RI)
4421        Assert(RI->getReturnValue() == DeoptCI,
4422               "calls to experimental_deoptimize must be followed by a return "
4423               "of the value computed by experimental_deoptimize");
4424    }
4425
4426    break;
4427  }
4428  };
4429}
4430
4431/// \brief Carefully grab the subprogram from a local scope.
4432///
4433/// This carefully grabs the subprogram from a local scope, avoiding the
4434/// built-in assertions that would typically fire.
4435static DISubprogram *getSubprogram(Metadata *LocalScope) {
4436  if (!LocalScope)
4437    return nullptr;
4438
4439  if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4440    return SP;
4441
4442  if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4443    return getSubprogram(LB->getRawScope());
4444
4445  // Just return null; broken scope chains are checked elsewhere.
4446  assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4447  return nullptr;
4448}
4449
4450void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4451  unsigned NumOperands = FPI.getNumArgOperands();
4452  Assert(((NumOperands == 5 && FPI.isTernaryOp()) ||
4453          (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
4454           "invalid arguments for constrained FP intrinsic", &FPI);
4455  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
4456         "invalid exception behavior argument", &FPI);
4457  Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
4458         "invalid rounding mode argument", &FPI);
4459  Assert(FPI.getRoundingMode() != ConstrainedFPIntrinsic::rmInvalid,
4460         "invalid rounding mode argument", &FPI);
4461  Assert(FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ebInvalid,
4462         "invalid exception behavior argument", &FPI);
4463}
4464
4465void Verifier::visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII) {
4466  auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4467  AssertDI(isa<ValueAsMetadata>(MD) ||
4468             (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4469         "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4470  AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4471         "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4472         DII.getRawVariable());
4473  AssertDI(isa<DIExpression>(DII.getRawExpression()),
4474         "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4475         DII.getRawExpression());
4476
4477  // Ignore broken !dbg attachments; they're checked elsewhere.
4478  if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4479    if (!isa<DILocation>(N))
4480      return;
4481
4482  BasicBlock *BB = DII.getParent();
4483  Function *F = BB ? BB->getParent() : nullptr;
4484
4485  // The scopes for variables and !dbg attachments must agree.
4486  DILocalVariable *Var = DII.getVariable();
4487  DILocation *Loc = DII.getDebugLoc();
4488  Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4489         &DII, BB, F);
4490
4491  DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4492  DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4493  if (!VarSP || !LocSP)
4494    return; // Broken scope chains are checked elsewhere.
4495
4496  AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4497                               " variable and !dbg attachment",
4498           &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4499           Loc->getScope()->getSubprogram());
4500
4501  verifyFnArgs(DII);
4502}
4503
4504void Verifier::verifyFragmentExpression(const DbgInfoIntrinsic &I) {
4505  DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4506  DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4507
4508  // We don't know whether this intrinsic verified correctly.
4509  if (!V || !E || !E->isValid())
4510    return;
4511
4512  // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4513  auto Fragment = E->getFragmentInfo();
4514  if (!Fragment)
4515    return;
4516
4517  // The frontend helps out GDB by emitting the members of local anonymous
4518  // unions as artificial local variables with shared storage. When SROA splits
4519  // the storage for artificial local variables that are smaller than the entire
4520  // union, the overhang piece will be outside of the allotted space for the
4521  // variable and this check fails.
4522  // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4523  if (V->isArtificial())
4524    return;
4525
4526  verifyFragmentExpression(*V, *Fragment, &I);
4527}
4528
4529template <typename ValueOrMetadata>
4530void Verifier::verifyFragmentExpression(const DIVariable &V,
4531                                        DIExpression::FragmentInfo Fragment,
4532                                        ValueOrMetadata *Desc) {
4533  // If there's no size, the type is broken, but that should be checked
4534  // elsewhere.
4535  auto VarSize = V.getSizeInBits();
4536  if (!VarSize)
4537    return;
4538
4539  unsigned FragSize = Fragment.SizeInBits;
4540  unsigned FragOffset = Fragment.OffsetInBits;
4541  AssertDI(FragSize + FragOffset <= *VarSize,
4542         "fragment is larger than or outside of variable", Desc, &V);
4543  AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
4544}
4545
4546void Verifier::verifyFnArgs(const DbgInfoIntrinsic &I) {
4547  // This function does not take the scope of noninlined function arguments into
4548  // account. Don't run it if current function is nodebug, because it may
4549  // contain inlined debug intrinsics.
4550  if (!HasDebugInfo)
4551    return;
4552
4553  // For performance reasons only check non-inlined ones.
4554  if (I.getDebugLoc()->getInlinedAt())
4555    return;
4556
4557  DILocalVariable *Var = I.getVariable();
4558  AssertDI(Var, "dbg intrinsic without variable");
4559
4560  unsigned ArgNo = Var->getArg();
4561  if (!ArgNo)
4562    return;
4563
4564  // Verify there are no duplicate function argument debug info entries.
4565  // These will cause hard-to-debug assertions in the DWARF backend.
4566  if (DebugFnArgs.size() < ArgNo)
4567    DebugFnArgs.resize(ArgNo, nullptr);
4568
4569  auto *Prev = DebugFnArgs[ArgNo - 1];
4570  DebugFnArgs[ArgNo - 1] = Var;
4571  AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
4572           Prev, Var);
4573}
4574
4575void Verifier::verifyCompileUnits() {
4576  // When more than one Module is imported into the same context, such as during
4577  // an LTO build before linking the modules, ODR type uniquing may cause types
4578  // to point to a different CU. This check does not make sense in this case.
4579  if (M.getContext().isODRUniquingDebugTypes())
4580    return;
4581  auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
4582  SmallPtrSet<const Metadata *, 2> Listed;
4583  if (CUs)
4584    Listed.insert(CUs->op_begin(), CUs->op_end());
4585  for (auto *CU : CUVisited)
4586    AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
4587  CUVisited.clear();
4588}
4589
4590void Verifier::verifyDeoptimizeCallingConvs() {
4591  if (DeoptimizeDeclarations.empty())
4592    return;
4593
4594  const Function *First = DeoptimizeDeclarations[0];
4595  for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
4596    Assert(First->getCallingConv() == F->getCallingConv(),
4597           "All llvm.experimental.deoptimize declarations must have the same "
4598           "calling convention",
4599           First, F);
4600  }
4601}
4602
4603//===----------------------------------------------------------------------===//
4604//  Implement the public interfaces to this file...
4605//===----------------------------------------------------------------------===//
4606
4607bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
4608  Function &F = const_cast<Function &>(f);
4609
4610  // Don't use a raw_null_ostream.  Printing IR is expensive.
4611  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
4612
4613  // Note that this function's return value is inverted from what you would
4614  // expect of a function called "verify".
4615  return !V.verify(F);
4616}
4617
4618bool llvm::verifyModule(const Module &M, raw_ostream *OS,
4619                        bool *BrokenDebugInfo) {
4620  // Don't use a raw_null_ostream.  Printing IR is expensive.
4621  Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
4622
4623  bool Broken = false;
4624  for (const Function &F : M)
4625    Broken |= !V.verify(F);
4626
4627  Broken |= !V.verify();
4628  if (BrokenDebugInfo)
4629    *BrokenDebugInfo = V.hasBrokenDebugInfo();
4630  // Note that this function's return value is inverted from what you would
4631  // expect of a function called "verify".
4632  return Broken;
4633}
4634
4635namespace {
4636
4637struct VerifierLegacyPass : public FunctionPass {
4638  static char ID;
4639
4640  std::unique_ptr<Verifier> V;
4641  bool FatalErrors = true;
4642
4643  VerifierLegacyPass() : FunctionPass(ID) {
4644    initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
4645  }
4646  explicit VerifierLegacyPass(bool FatalErrors)
4647      : FunctionPass(ID),
4648        FatalErrors(FatalErrors) {
4649    initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
4650  }
4651
4652  bool doInitialization(Module &M) override {
4653    V = llvm::make_unique<Verifier>(
4654        &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
4655    return false;
4656  }
4657
4658  bool runOnFunction(Function &F) override {
4659    if (!V->verify(F) && FatalErrors)
4660      report_fatal_error("Broken function found, compilation aborted!");
4661
4662    return false;
4663  }
4664
4665  bool doFinalization(Module &M) override {
4666    bool HasErrors = false;
4667    for (Function &F : M)
4668      if (F.isDeclaration())
4669        HasErrors |= !V->verify(F);
4670
4671    HasErrors |= !V->verify();
4672    if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
4673      report_fatal_error("Broken module found, compilation aborted!");
4674    return false;
4675  }
4676
4677  void getAnalysisUsage(AnalysisUsage &AU) const override {
4678    AU.setPreservesAll();
4679  }
4680};
4681
4682} // end anonymous namespace
4683
4684/// Helper to issue failure from the TBAA verification
4685template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
4686  if (Diagnostic)
4687    return Diagnostic->CheckFailed(Args...);
4688}
4689
4690#define AssertTBAA(C, ...)                                                     \
4691  do {                                                                         \
4692    if (!(C)) {                                                                \
4693      CheckFailed(__VA_ARGS__);                                                \
4694      return false;                                                            \
4695    }                                                                          \
4696  } while (false)
4697
4698/// Verify that \p BaseNode can be used as the "base type" in the struct-path
4699/// TBAA scheme.  This means \p BaseNode is either a scalar node, or a
4700/// struct-type node describing an aggregate data structure (like a struct).
4701TBAAVerifier::TBAABaseNodeSummary
4702TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
4703                                 bool IsNewFormat) {
4704  if (BaseNode->getNumOperands() < 2) {
4705    CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
4706    return {true, ~0u};
4707  }
4708
4709  auto Itr = TBAABaseNodes.find(BaseNode);
4710  if (Itr != TBAABaseNodes.end())
4711    return Itr->second;
4712
4713  auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
4714  auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
4715  (void)InsertResult;
4716  assert(InsertResult.second && "We just checked!");
4717  return Result;
4718}
4719
4720TBAAVerifier::TBAABaseNodeSummary
4721TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
4722                                     bool IsNewFormat) {
4723  const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
4724
4725  if (BaseNode->getNumOperands() == 2) {
4726    // Scalar nodes can only be accessed at offset 0.
4727    return isValidScalarTBAANode(BaseNode)
4728               ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
4729               : InvalidNode;
4730  }
4731
4732  if (IsNewFormat) {
4733    if (BaseNode->getNumOperands() % 3 != 0) {
4734      CheckFailed("Access tag nodes must have the number of operands that is a "
4735                  "multiple of 3!", BaseNode);
4736      return InvalidNode;
4737    }
4738  } else {
4739    if (BaseNode->getNumOperands() % 2 != 1) {
4740      CheckFailed("Struct tag nodes must have an odd number of operands!",
4741                  BaseNode);
4742      return InvalidNode;
4743    }
4744  }
4745
4746  // Check the type size field.
4747  if (IsNewFormat) {
4748    auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4749        BaseNode->getOperand(1));
4750    if (!TypeSizeNode) {
4751      CheckFailed("Type size nodes must be constants!", &I, BaseNode);
4752      return InvalidNode;
4753    }
4754  }
4755
4756  // Check the type name field. In the new format it can be anything.
4757  if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
4758    CheckFailed("Struct tag nodes have a string as their first operand",
4759                BaseNode);
4760    return InvalidNode;
4761  }
4762
4763  bool Failed = false;
4764
4765  Optional<APInt> PrevOffset;
4766  unsigned BitWidth = ~0u;
4767
4768  // We've already checked that BaseNode is not a degenerate root node with one
4769  // operand in \c verifyTBAABaseNode, so this loop should run at least once.
4770  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
4771  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
4772  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
4773           Idx += NumOpsPerField) {
4774    const MDOperand &FieldTy = BaseNode->getOperand(Idx);
4775    const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
4776    if (!isa<MDNode>(FieldTy)) {
4777      CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
4778      Failed = true;
4779      continue;
4780    }
4781
4782    auto *OffsetEntryCI =
4783        mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
4784    if (!OffsetEntryCI) {
4785      CheckFailed("Offset entries must be constants!", &I, BaseNode);
4786      Failed = true;
4787      continue;
4788    }
4789
4790    if (BitWidth == ~0u)
4791      BitWidth = OffsetEntryCI->getBitWidth();
4792
4793    if (OffsetEntryCI->getBitWidth() != BitWidth) {
4794      CheckFailed(
4795          "Bitwidth between the offsets and struct type entries must match", &I,
4796          BaseNode);
4797      Failed = true;
4798      continue;
4799    }
4800
4801    // NB! As far as I can tell, we generate a non-strictly increasing offset
4802    // sequence only from structs that have zero size bit fields.  When
4803    // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
4804    // pick the field lexically the latest in struct type metadata node.  This
4805    // mirrors the actual behavior of the alias analysis implementation.
4806    bool IsAscending =
4807        !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
4808
4809    if (!IsAscending) {
4810      CheckFailed("Offsets must be increasing!", &I, BaseNode);
4811      Failed = true;
4812    }
4813
4814    PrevOffset = OffsetEntryCI->getValue();
4815
4816    if (IsNewFormat) {
4817      auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4818          BaseNode->getOperand(Idx + 2));
4819      if (!MemberSizeNode) {
4820        CheckFailed("Member size entries must be constants!", &I, BaseNode);
4821        Failed = true;
4822        continue;
4823      }
4824    }
4825  }
4826
4827  return Failed ? InvalidNode
4828                : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
4829}
4830
4831static bool IsRootTBAANode(const MDNode *MD) {
4832  return MD->getNumOperands() < 2;
4833}
4834
4835static bool IsScalarTBAANodeImpl(const MDNode *MD,
4836                                 SmallPtrSetImpl<const MDNode *> &Visited) {
4837  if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
4838    return false;
4839
4840  if (!isa<MDString>(MD->getOperand(0)))
4841    return false;
4842
4843  if (MD->getNumOperands() == 3) {
4844    auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
4845    if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
4846      return false;
4847  }
4848
4849  auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
4850  return Parent && Visited.insert(Parent).second &&
4851         (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
4852}
4853
4854bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
4855  auto ResultIt = TBAAScalarNodes.find(MD);
4856  if (ResultIt != TBAAScalarNodes.end())
4857    return ResultIt->second;
4858
4859  SmallPtrSet<const MDNode *, 4> Visited;
4860  bool Result = IsScalarTBAANodeImpl(MD, Visited);
4861  auto InsertResult = TBAAScalarNodes.insert({MD, Result});
4862  (void)InsertResult;
4863  assert(InsertResult.second && "Just checked!");
4864
4865  return Result;
4866}
4867
4868/// Returns the field node at the offset \p Offset in \p BaseNode.  Update \p
4869/// Offset in place to be the offset within the field node returned.
4870///
4871/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
4872MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
4873                                                   const MDNode *BaseNode,
4874                                                   APInt &Offset,
4875                                                   bool IsNewFormat) {
4876  assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
4877
4878  // Scalar nodes have only one possible "field" -- their parent in the access
4879  // hierarchy.  Offset must be zero at this point, but our caller is supposed
4880  // to Assert that.
4881  if (BaseNode->getNumOperands() == 2)
4882    return cast<MDNode>(BaseNode->getOperand(1));
4883
4884  unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
4885  unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
4886  for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
4887           Idx += NumOpsPerField) {
4888    auto *OffsetEntryCI =
4889        mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
4890    if (OffsetEntryCI->getValue().ugt(Offset)) {
4891      if (Idx == FirstFieldOpNo) {
4892        CheckFailed("Could not find TBAA parent in struct type node", &I,
4893                    BaseNode, &Offset);
4894        return nullptr;
4895      }
4896
4897      unsigned PrevIdx = Idx - NumOpsPerField;
4898      auto *PrevOffsetEntryCI =
4899          mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
4900      Offset -= PrevOffsetEntryCI->getValue();
4901      return cast<MDNode>(BaseNode->getOperand(PrevIdx));
4902    }
4903  }
4904
4905  unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
4906  auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
4907      BaseNode->getOperand(LastIdx + 1));
4908  Offset -= LastOffsetEntryCI->getValue();
4909  return cast<MDNode>(BaseNode->getOperand(LastIdx));
4910}
4911
4912static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
4913  if (!Type || Type->getNumOperands() < 3)
4914    return false;
4915
4916  // In the new format type nodes shall have a reference to the parent type as
4917  // its first operand.
4918  MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
4919  if (!Parent)
4920    return false;
4921
4922  return true;
4923}
4924
4925bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
4926  AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
4927                 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
4928                 isa<AtomicCmpXchgInst>(I),
4929             "This instruction shall not have a TBAA access tag!", &I);
4930
4931  bool IsStructPathTBAA =
4932      isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
4933
4934  AssertTBAA(
4935      IsStructPathTBAA,
4936      "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
4937
4938  MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
4939  MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
4940
4941  bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
4942
4943  if (IsNewFormat) {
4944    AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
4945               "Access tag metadata must have either 4 or 5 operands", &I, MD);
4946  } else {
4947    AssertTBAA(MD->getNumOperands() < 5,
4948               "Struct tag metadata must have either 3 or 4 operands", &I, MD);
4949  }
4950
4951  // Check the access size field.
4952  if (IsNewFormat) {
4953    auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4954        MD->getOperand(3));
4955    AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
4956  }
4957
4958  // Check the immutability flag.
4959  unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
4960  if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
4961    auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
4962        MD->getOperand(ImmutabilityFlagOpNo));
4963    AssertTBAA(IsImmutableCI,
4964               "Immutability tag on struct tag metadata must be a constant",
4965               &I, MD);
4966    AssertTBAA(
4967        IsImmutableCI->isZero() || IsImmutableCI->isOne(),
4968        "Immutability part of the struct tag metadata must be either 0 or 1",
4969        &I, MD);
4970  }
4971
4972  AssertTBAA(BaseNode && AccessType,
4973             "Malformed struct tag metadata: base and access-type "
4974             "should be non-null and point to Metadata nodes",
4975             &I, MD, BaseNode, AccessType);
4976
4977  if (!IsNewFormat) {
4978    AssertTBAA(isValidScalarTBAANode(AccessType),
4979               "Access type node must be a valid scalar type", &I, MD,
4980               AccessType);
4981  }
4982
4983  auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
4984  AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
4985
4986  APInt Offset = OffsetCI->getValue();
4987  bool SeenAccessTypeInPath = false;
4988
4989  SmallPtrSet<MDNode *, 4> StructPath;
4990
4991  for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
4992       BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
4993                                               IsNewFormat)) {
4994    if (!StructPath.insert(BaseNode).second) {
4995      CheckFailed("Cycle detected in struct path", &I, MD);
4996      return false;
4997    }
4998
4999    bool Invalid;
5000    unsigned BaseNodeBitWidth;
5001    std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
5002                                                             IsNewFormat);
5003
5004    // If the base node is invalid in itself, then we've already printed all the
5005    // errors we wanted to print.
5006    if (Invalid)
5007      return false;
5008
5009    SeenAccessTypeInPath |= BaseNode == AccessType;
5010
5011    if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
5012      AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
5013                 &I, MD, &Offset);
5014
5015    AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
5016                   (BaseNodeBitWidth == 0 && Offset == 0) ||
5017                   (IsNewFormat && BaseNodeBitWidth == ~0u),
5018               "Access bit-width not the same as description bit-width", &I, MD,
5019               BaseNodeBitWidth, Offset.getBitWidth());
5020
5021    if (IsNewFormat && SeenAccessTypeInPath)
5022      break;
5023  }
5024
5025  AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
5026             &I, MD);
5027  return true;
5028}
5029
5030char VerifierLegacyPass::ID = 0;
5031INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
5032
5033FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
5034  return new VerifierLegacyPass(FatalErrors);
5035}
5036
5037AnalysisKey VerifierAnalysis::Key;
5038VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
5039                                               ModuleAnalysisManager &) {
5040  Result Res;
5041  Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
5042  return Res;
5043}
5044
5045VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
5046                                               FunctionAnalysisManager &) {
5047  return { llvm::verifyFunction(F, &dbgs()), false };
5048}
5049
5050PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
5051  auto Res = AM.getResult<VerifierAnalysis>(M);
5052  if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
5053    report_fatal_error("Broken module found, compilation aborted!");
5054
5055  return PreservedAnalyses::all();
5056}
5057
5058PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
5059  auto res = AM.getResult<VerifierAnalysis>(F);
5060  if (res.IRBroken && FatalErrors)
5061    report_fatal_error("Broken function found, compilation aborted!");
5062
5063  return PreservedAnalyses::all();
5064}
5065