Instruction.cpp revision 344779
1//===-- Instruction.cpp - Implement the Instruction class -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Instruction class for the IR library.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/IR/Instruction.h"
15#include "llvm/IR/IntrinsicInst.h"
16#include "llvm/ADT/DenseSet.h"
17#include "llvm/IR/Constants.h"
18#include "llvm/IR/Instructions.h"
19#include "llvm/IR/MDBuilder.h"
20#include "llvm/IR/Operator.h"
21#include "llvm/IR/Type.h"
22using namespace llvm;
23
24Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25                         Instruction *InsertBefore)
26  : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27
28  // If requested, insert this instruction into a basic block...
29  if (InsertBefore) {
30    BasicBlock *BB = InsertBefore->getParent();
31    assert(BB && "Instruction to insert before is not in a basic block!");
32    BB->getInstList().insert(InsertBefore->getIterator(), this);
33  }
34}
35
36Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
37                         BasicBlock *InsertAtEnd)
38  : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
39
40  // append this instruction into the basic block
41  assert(InsertAtEnd && "Basic block to append to may not be NULL!");
42  InsertAtEnd->getInstList().push_back(this);
43}
44
45Instruction::~Instruction() {
46  assert(!Parent && "Instruction still linked in the program!");
47  if (hasMetadataHashEntry())
48    clearMetadataHashEntries();
49}
50
51
52void Instruction::setParent(BasicBlock *P) {
53  Parent = P;
54}
55
56const Module *Instruction::getModule() const {
57  return getParent()->getModule();
58}
59
60const Function *Instruction::getFunction() const {
61  return getParent()->getParent();
62}
63
64void Instruction::removeFromParent() {
65  getParent()->getInstList().remove(getIterator());
66}
67
68iplist<Instruction>::iterator Instruction::eraseFromParent() {
69  return getParent()->getInstList().erase(getIterator());
70}
71
72/// Insert an unlinked instruction into a basic block immediately before the
73/// specified instruction.
74void Instruction::insertBefore(Instruction *InsertPos) {
75  InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
76}
77
78/// Insert an unlinked instruction into a basic block immediately after the
79/// specified instruction.
80void Instruction::insertAfter(Instruction *InsertPos) {
81  InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
82                                                    this);
83}
84
85/// Unlink this instruction from its current basic block and insert it into the
86/// basic block that MovePos lives in, right before MovePos.
87void Instruction::moveBefore(Instruction *MovePos) {
88  moveBefore(*MovePos->getParent(), MovePos->getIterator());
89}
90
91void Instruction::moveAfter(Instruction *MovePos) {
92  moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
93}
94
95void Instruction::moveBefore(BasicBlock &BB,
96                             SymbolTableList<Instruction>::iterator I) {
97  assert(I == BB.end() || I->getParent() == &BB);
98  BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
99}
100
101void Instruction::setHasNoUnsignedWrap(bool b) {
102  cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
103}
104
105void Instruction::setHasNoSignedWrap(bool b) {
106  cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
107}
108
109void Instruction::setIsExact(bool b) {
110  cast<PossiblyExactOperator>(this)->setIsExact(b);
111}
112
113bool Instruction::hasNoUnsignedWrap() const {
114  return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
115}
116
117bool Instruction::hasNoSignedWrap() const {
118  return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
119}
120
121void Instruction::dropPoisonGeneratingFlags() {
122  switch (getOpcode()) {
123  case Instruction::Add:
124  case Instruction::Sub:
125  case Instruction::Mul:
126  case Instruction::Shl:
127    cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
128    cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
129    break;
130
131  case Instruction::UDiv:
132  case Instruction::SDiv:
133  case Instruction::AShr:
134  case Instruction::LShr:
135    cast<PossiblyExactOperator>(this)->setIsExact(false);
136    break;
137
138  case Instruction::GetElementPtr:
139    cast<GetElementPtrInst>(this)->setIsInBounds(false);
140    break;
141  }
142}
143
144bool Instruction::isExact() const {
145  return cast<PossiblyExactOperator>(this)->isExact();
146}
147
148void Instruction::setFast(bool B) {
149  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
150  cast<FPMathOperator>(this)->setFast(B);
151}
152
153void Instruction::setHasAllowReassoc(bool B) {
154  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
155  cast<FPMathOperator>(this)->setHasAllowReassoc(B);
156}
157
158void Instruction::setHasNoNaNs(bool B) {
159  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
160  cast<FPMathOperator>(this)->setHasNoNaNs(B);
161}
162
163void Instruction::setHasNoInfs(bool B) {
164  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
165  cast<FPMathOperator>(this)->setHasNoInfs(B);
166}
167
168void Instruction::setHasNoSignedZeros(bool B) {
169  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
170  cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
171}
172
173void Instruction::setHasAllowReciprocal(bool B) {
174  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
175  cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
176}
177
178void Instruction::setHasApproxFunc(bool B) {
179  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
180  cast<FPMathOperator>(this)->setHasApproxFunc(B);
181}
182
183void Instruction::setFastMathFlags(FastMathFlags FMF) {
184  assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
185  cast<FPMathOperator>(this)->setFastMathFlags(FMF);
186}
187
188void Instruction::copyFastMathFlags(FastMathFlags FMF) {
189  assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
190  cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
191}
192
193bool Instruction::isFast() const {
194  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
195  return cast<FPMathOperator>(this)->isFast();
196}
197
198bool Instruction::hasAllowReassoc() const {
199  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
200  return cast<FPMathOperator>(this)->hasAllowReassoc();
201}
202
203bool Instruction::hasNoNaNs() const {
204  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
205  return cast<FPMathOperator>(this)->hasNoNaNs();
206}
207
208bool Instruction::hasNoInfs() const {
209  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
210  return cast<FPMathOperator>(this)->hasNoInfs();
211}
212
213bool Instruction::hasNoSignedZeros() const {
214  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
215  return cast<FPMathOperator>(this)->hasNoSignedZeros();
216}
217
218bool Instruction::hasAllowReciprocal() const {
219  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
220  return cast<FPMathOperator>(this)->hasAllowReciprocal();
221}
222
223bool Instruction::hasAllowContract() const {
224  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
225  return cast<FPMathOperator>(this)->hasAllowContract();
226}
227
228bool Instruction::hasApproxFunc() const {
229  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
230  return cast<FPMathOperator>(this)->hasApproxFunc();
231}
232
233FastMathFlags Instruction::getFastMathFlags() const {
234  assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
235  return cast<FPMathOperator>(this)->getFastMathFlags();
236}
237
238void Instruction::copyFastMathFlags(const Instruction *I) {
239  copyFastMathFlags(I->getFastMathFlags());
240}
241
242void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
243  // Copy the wrapping flags.
244  if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
245    if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
246      setHasNoSignedWrap(OB->hasNoSignedWrap());
247      setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
248    }
249  }
250
251  // Copy the exact flag.
252  if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
253    if (isa<PossiblyExactOperator>(this))
254      setIsExact(PE->isExact());
255
256  // Copy the fast-math flags.
257  if (auto *FP = dyn_cast<FPMathOperator>(V))
258    if (isa<FPMathOperator>(this))
259      copyFastMathFlags(FP->getFastMathFlags());
260
261  if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
262    if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
263      DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds());
264}
265
266void Instruction::andIRFlags(const Value *V) {
267  if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
268    if (isa<OverflowingBinaryOperator>(this)) {
269      setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
270      setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
271    }
272  }
273
274  if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
275    if (isa<PossiblyExactOperator>(this))
276      setIsExact(isExact() & PE->isExact());
277
278  if (auto *FP = dyn_cast<FPMathOperator>(V)) {
279    if (isa<FPMathOperator>(this)) {
280      FastMathFlags FM = getFastMathFlags();
281      FM &= FP->getFastMathFlags();
282      copyFastMathFlags(FM);
283    }
284  }
285
286  if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
287    if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
288      DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds());
289}
290
291const char *Instruction::getOpcodeName(unsigned OpCode) {
292  switch (OpCode) {
293  // Terminators
294  case Ret:    return "ret";
295  case Br:     return "br";
296  case Switch: return "switch";
297  case IndirectBr: return "indirectbr";
298  case Invoke: return "invoke";
299  case Resume: return "resume";
300  case Unreachable: return "unreachable";
301  case CleanupRet: return "cleanupret";
302  case CatchRet: return "catchret";
303  case CatchPad: return "catchpad";
304  case CatchSwitch: return "catchswitch";
305
306  // Standard unary operators...
307  case FNeg: return "fneg";
308
309  // Standard binary operators...
310  case Add: return "add";
311  case FAdd: return "fadd";
312  case Sub: return "sub";
313  case FSub: return "fsub";
314  case Mul: return "mul";
315  case FMul: return "fmul";
316  case UDiv: return "udiv";
317  case SDiv: return "sdiv";
318  case FDiv: return "fdiv";
319  case URem: return "urem";
320  case SRem: return "srem";
321  case FRem: return "frem";
322
323  // Logical operators...
324  case And: return "and";
325  case Or : return "or";
326  case Xor: return "xor";
327
328  // Memory instructions...
329  case Alloca:        return "alloca";
330  case Load:          return "load";
331  case Store:         return "store";
332  case AtomicCmpXchg: return "cmpxchg";
333  case AtomicRMW:     return "atomicrmw";
334  case Fence:         return "fence";
335  case GetElementPtr: return "getelementptr";
336
337  // Convert instructions...
338  case Trunc:         return "trunc";
339  case ZExt:          return "zext";
340  case SExt:          return "sext";
341  case FPTrunc:       return "fptrunc";
342  case FPExt:         return "fpext";
343  case FPToUI:        return "fptoui";
344  case FPToSI:        return "fptosi";
345  case UIToFP:        return "uitofp";
346  case SIToFP:        return "sitofp";
347  case IntToPtr:      return "inttoptr";
348  case PtrToInt:      return "ptrtoint";
349  case BitCast:       return "bitcast";
350  case AddrSpaceCast: return "addrspacecast";
351
352  // Other instructions...
353  case ICmp:           return "icmp";
354  case FCmp:           return "fcmp";
355  case PHI:            return "phi";
356  case Select:         return "select";
357  case Call:           return "call";
358  case Shl:            return "shl";
359  case LShr:           return "lshr";
360  case AShr:           return "ashr";
361  case VAArg:          return "va_arg";
362  case ExtractElement: return "extractelement";
363  case InsertElement:  return "insertelement";
364  case ShuffleVector:  return "shufflevector";
365  case ExtractValue:   return "extractvalue";
366  case InsertValue:    return "insertvalue";
367  case LandingPad:     return "landingpad";
368  case CleanupPad:     return "cleanuppad";
369
370  default: return "<Invalid operator> ";
371  }
372}
373
374/// Return true if both instructions have the same special state. This must be
375/// kept in sync with FunctionComparator::cmpOperations in
376/// lib/Transforms/IPO/MergeFunctions.cpp.
377static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
378                                 bool IgnoreAlignment = false) {
379  assert(I1->getOpcode() == I2->getOpcode() &&
380         "Can not compare special state of different instructions");
381
382  if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
383    return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
384           (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
385            IgnoreAlignment);
386  if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
387    return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
388           (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
389            IgnoreAlignment) &&
390           LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
391           LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
392  if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
393    return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
394           (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
395            IgnoreAlignment) &&
396           SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
397           SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
398  if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
399    return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
400  if (const CallInst *CI = dyn_cast<CallInst>(I1))
401    return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
402           CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
403           CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
404           CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
405  if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
406    return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
407           CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
408           CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
409  if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
410    return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
411  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
412    return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
413  if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
414    return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
415           FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
416  if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
417    return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
418           CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
419           CXI->getSuccessOrdering() ==
420               cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
421           CXI->getFailureOrdering() ==
422               cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
423           CXI->getSyncScopeID() ==
424               cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
425  if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
426    return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
427           RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
428           RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
429           RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
430
431  return true;
432}
433
434bool Instruction::isIdenticalTo(const Instruction *I) const {
435  return isIdenticalToWhenDefined(I) &&
436         SubclassOptionalData == I->SubclassOptionalData;
437}
438
439bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
440  if (getOpcode() != I->getOpcode() ||
441      getNumOperands() != I->getNumOperands() ||
442      getType() != I->getType())
443    return false;
444
445  // If both instructions have no operands, they are identical.
446  if (getNumOperands() == 0 && I->getNumOperands() == 0)
447    return haveSameSpecialState(this, I);
448
449  // We have two instructions of identical opcode and #operands.  Check to see
450  // if all operands are the same.
451  if (!std::equal(op_begin(), op_end(), I->op_begin()))
452    return false;
453
454  if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
455    const PHINode *otherPHI = cast<PHINode>(I);
456    return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
457                      otherPHI->block_begin());
458  }
459
460  return haveSameSpecialState(this, I);
461}
462
463// Keep this in sync with FunctionComparator::cmpOperations in
464// lib/Transforms/IPO/MergeFunctions.cpp.
465bool Instruction::isSameOperationAs(const Instruction *I,
466                                    unsigned flags) const {
467  bool IgnoreAlignment = flags & CompareIgnoringAlignment;
468  bool UseScalarTypes  = flags & CompareUsingScalarTypes;
469
470  if (getOpcode() != I->getOpcode() ||
471      getNumOperands() != I->getNumOperands() ||
472      (UseScalarTypes ?
473       getType()->getScalarType() != I->getType()->getScalarType() :
474       getType() != I->getType()))
475    return false;
476
477  // We have two instructions of identical opcode and #operands.  Check to see
478  // if all operands are the same type
479  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
480    if (UseScalarTypes ?
481        getOperand(i)->getType()->getScalarType() !=
482          I->getOperand(i)->getType()->getScalarType() :
483        getOperand(i)->getType() != I->getOperand(i)->getType())
484      return false;
485
486  return haveSameSpecialState(this, I, IgnoreAlignment);
487}
488
489bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
490  for (const Use &U : uses()) {
491    // PHI nodes uses values in the corresponding predecessor block.  For other
492    // instructions, just check to see whether the parent of the use matches up.
493    const Instruction *I = cast<Instruction>(U.getUser());
494    const PHINode *PN = dyn_cast<PHINode>(I);
495    if (!PN) {
496      if (I->getParent() != BB)
497        return true;
498      continue;
499    }
500
501    if (PN->getIncomingBlock(U) != BB)
502      return true;
503  }
504  return false;
505}
506
507bool Instruction::mayReadFromMemory() const {
508  switch (getOpcode()) {
509  default: return false;
510  case Instruction::VAArg:
511  case Instruction::Load:
512  case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
513  case Instruction::AtomicCmpXchg:
514  case Instruction::AtomicRMW:
515  case Instruction::CatchPad:
516  case Instruction::CatchRet:
517    return true;
518  case Instruction::Call:
519    return !cast<CallInst>(this)->doesNotAccessMemory();
520  case Instruction::Invoke:
521    return !cast<InvokeInst>(this)->doesNotAccessMemory();
522  case Instruction::Store:
523    return !cast<StoreInst>(this)->isUnordered();
524  }
525}
526
527bool Instruction::mayWriteToMemory() const {
528  switch (getOpcode()) {
529  default: return false;
530  case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
531  case Instruction::Store:
532  case Instruction::VAArg:
533  case Instruction::AtomicCmpXchg:
534  case Instruction::AtomicRMW:
535  case Instruction::CatchPad:
536  case Instruction::CatchRet:
537    return true;
538  case Instruction::Call:
539    return !cast<CallInst>(this)->onlyReadsMemory();
540  case Instruction::Invoke:
541    return !cast<InvokeInst>(this)->onlyReadsMemory();
542  case Instruction::Load:
543    return !cast<LoadInst>(this)->isUnordered();
544  }
545}
546
547bool Instruction::isAtomic() const {
548  switch (getOpcode()) {
549  default:
550    return false;
551  case Instruction::AtomicCmpXchg:
552  case Instruction::AtomicRMW:
553  case Instruction::Fence:
554    return true;
555  case Instruction::Load:
556    return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
557  case Instruction::Store:
558    return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
559  }
560}
561
562bool Instruction::hasAtomicLoad() const {
563  assert(isAtomic());
564  switch (getOpcode()) {
565  default:
566    return false;
567  case Instruction::AtomicCmpXchg:
568  case Instruction::AtomicRMW:
569  case Instruction::Load:
570    return true;
571  }
572}
573
574bool Instruction::hasAtomicStore() const {
575  assert(isAtomic());
576  switch (getOpcode()) {
577  default:
578    return false;
579  case Instruction::AtomicCmpXchg:
580  case Instruction::AtomicRMW:
581  case Instruction::Store:
582    return true;
583  }
584}
585
586bool Instruction::mayThrow() const {
587  if (const CallInst *CI = dyn_cast<CallInst>(this))
588    return !CI->doesNotThrow();
589  if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
590    return CRI->unwindsToCaller();
591  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
592    return CatchSwitch->unwindsToCaller();
593  return isa<ResumeInst>(this);
594}
595
596bool Instruction::isSafeToRemove() const {
597  return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
598         !this->isTerminator();
599}
600
601bool Instruction::isLifetimeStartOrEnd() const {
602  auto II = dyn_cast<IntrinsicInst>(this);
603  if (!II)
604    return false;
605  Intrinsic::ID ID = II->getIntrinsicID();
606  return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
607}
608
609const Instruction *Instruction::getNextNonDebugInstruction() const {
610  for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
611    if (!isa<DbgInfoIntrinsic>(I))
612      return I;
613  return nullptr;
614}
615
616const Instruction *Instruction::getPrevNonDebugInstruction() const {
617  for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
618    if (!isa<DbgInfoIntrinsic>(I))
619      return I;
620  return nullptr;
621}
622
623bool Instruction::isAssociative() const {
624  unsigned Opcode = getOpcode();
625  if (isAssociative(Opcode))
626    return true;
627
628  switch (Opcode) {
629  case FMul:
630  case FAdd:
631    return cast<FPMathOperator>(this)->hasAllowReassoc() &&
632           cast<FPMathOperator>(this)->hasNoSignedZeros();
633  default:
634    return false;
635  }
636}
637
638unsigned Instruction::getNumSuccessors() const {
639  switch (getOpcode()) {
640#define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
641  case Instruction::OPC:                                                       \
642    return static_cast<const CLASS *>(this)->getNumSuccessors();
643#include "llvm/IR/Instruction.def"
644  default:
645    break;
646  }
647  llvm_unreachable("not a terminator");
648}
649
650BasicBlock *Instruction::getSuccessor(unsigned idx) const {
651  switch (getOpcode()) {
652#define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
653  case Instruction::OPC:                                                       \
654    return static_cast<const CLASS *>(this)->getSuccessor(idx);
655#include "llvm/IR/Instruction.def"
656  default:
657    break;
658  }
659  llvm_unreachable("not a terminator");
660}
661
662void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
663  switch (getOpcode()) {
664#define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
665  case Instruction::OPC:                                                       \
666    return static_cast<CLASS *>(this)->setSuccessor(idx, B);
667#include "llvm/IR/Instruction.def"
668  default:
669    break;
670  }
671  llvm_unreachable("not a terminator");
672}
673
674Instruction *Instruction::cloneImpl() const {
675  llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
676}
677
678void Instruction::swapProfMetadata() {
679  MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
680  if (!ProfileData || ProfileData->getNumOperands() != 3 ||
681      !isa<MDString>(ProfileData->getOperand(0)))
682    return;
683
684  MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
685  if (MDName->getString() != "branch_weights")
686    return;
687
688  // The first operand is the name. Fetch them backwards and build a new one.
689  Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
690                     ProfileData->getOperand(1)};
691  setMetadata(LLVMContext::MD_prof,
692              MDNode::get(ProfileData->getContext(), Ops));
693}
694
695void Instruction::copyMetadata(const Instruction &SrcInst,
696                               ArrayRef<unsigned> WL) {
697  if (!SrcInst.hasMetadata())
698    return;
699
700  DenseSet<unsigned> WLS;
701  for (unsigned M : WL)
702    WLS.insert(M);
703
704  // Otherwise, enumerate and copy over metadata from the old instruction to the
705  // new one.
706  SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
707  SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
708  for (const auto &MD : TheMDs) {
709    if (WL.empty() || WLS.count(MD.first))
710      setMetadata(MD.first, MD.second);
711  }
712  if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
713    setDebugLoc(SrcInst.getDebugLoc());
714}
715
716Instruction *Instruction::clone() const {
717  Instruction *New = nullptr;
718  switch (getOpcode()) {
719  default:
720    llvm_unreachable("Unhandled Opcode.");
721#define HANDLE_INST(num, opc, clas)                                            \
722  case Instruction::opc:                                                       \
723    New = cast<clas>(this)->cloneImpl();                                       \
724    break;
725#include "llvm/IR/Instruction.def"
726#undef HANDLE_INST
727  }
728
729  New->SubclassOptionalData = SubclassOptionalData;
730  New->copyMetadata(*this);
731  return New;
732}
733
734void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
735  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
736  if (ProfileData == nullptr)
737    return;
738
739  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
740  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
741                        !ProfDataName->getString().equals("VP")))
742    return;
743
744  MDBuilder MDB(getContext());
745  SmallVector<Metadata *, 3> Vals;
746  Vals.push_back(ProfileData->getOperand(0));
747  APInt APS(128, S), APT(128, T);
748  if (ProfDataName->getString().equals("branch_weights"))
749    for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
750      // Using APInt::div may be expensive, but most cases should fit 64 bits.
751      APInt Val(128,
752                mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
753                    ->getValue()
754                    .getZExtValue());
755      Val *= APS;
756      Vals.push_back(MDB.createConstant(
757          ConstantInt::get(Type::getInt64Ty(getContext()),
758                           Val.udiv(APT).getLimitedValue())));
759    }
760  else if (ProfDataName->getString().equals("VP"))
761    for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
762      // The first value is the key of the value profile, which will not change.
763      Vals.push_back(ProfileData->getOperand(i));
764      // Using APInt::div may be expensive, but most cases should fit 64 bits.
765      APInt Val(128,
766                mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
767                    ->getValue()
768                    .getZExtValue());
769      Val *= APS;
770      Vals.push_back(MDB.createConstant(
771          ConstantInt::get(Type::getInt64Ty(getContext()),
772                           Val.udiv(APT).getLimitedValue())));
773    }
774  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
775}
776
777void Instruction::setProfWeight(uint64_t W) {
778  assert((isa<CallInst>(this) || isa<InvokeInst>(this)) &&
779         "Can only set weights for call and invoke instrucitons");
780  SmallVector<uint32_t, 1> Weights;
781  Weights.push_back(W);
782  MDBuilder MDB(getContext());
783  setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
784}
785