SelectionDAGBuilder.h revision 353358
1//===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This implements routines for translating from LLVM IR into SelectionDAG IR. 10// 11//===----------------------------------------------------------------------===// 12 13#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 14#define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 15 16#include "StatepointLowering.h" 17#include "llvm/ADT/APInt.h" 18#include "llvm/ADT/ArrayRef.h" 19#include "llvm/ADT/DenseMap.h" 20#include "llvm/ADT/MapVector.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/ISDOpcodes.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/CodeGen/SelectionDAGNodes.h" 26#include "llvm/CodeGen/SwitchLoweringUtils.h" 27#include "llvm/CodeGen/TargetLowering.h" 28#include "llvm/CodeGen/ValueTypes.h" 29#include "llvm/IR/CallSite.h" 30#include "llvm/IR/DebugLoc.h" 31#include "llvm/IR/Instruction.h" 32#include "llvm/IR/Statepoint.h" 33#include "llvm/Support/BranchProbability.h" 34#include "llvm/Support/CodeGen.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Support/MachineValueType.h" 37#include <algorithm> 38#include <cassert> 39#include <cstdint> 40#include <utility> 41#include <vector> 42 43namespace llvm { 44 45class AllocaInst; 46class AtomicCmpXchgInst; 47class AtomicRMWInst; 48class BasicBlock; 49class BranchInst; 50class CallInst; 51class CallBrInst; 52class CatchPadInst; 53class CatchReturnInst; 54class CatchSwitchInst; 55class CleanupPadInst; 56class CleanupReturnInst; 57class Constant; 58class ConstantInt; 59class ConstrainedFPIntrinsic; 60class DbgValueInst; 61class DataLayout; 62class DIExpression; 63class DILocalVariable; 64class DILocation; 65class FenceInst; 66class FunctionLoweringInfo; 67class GCFunctionInfo; 68class GCRelocateInst; 69class GCResultInst; 70class IndirectBrInst; 71class InvokeInst; 72class LandingPadInst; 73class LLVMContext; 74class LoadInst; 75class MachineBasicBlock; 76class PHINode; 77class ResumeInst; 78class ReturnInst; 79class SDDbgValue; 80class StoreInst; 81class SwiftErrorValueTracking; 82class SwitchInst; 83class TargetLibraryInfo; 84class TargetMachine; 85class Type; 86class VAArgInst; 87class UnreachableInst; 88class Use; 89class User; 90class Value; 91 92//===----------------------------------------------------------------------===// 93/// SelectionDAGBuilder - This is the common target-independent lowering 94/// implementation that is parameterized by a TargetLowering object. 95/// 96class SelectionDAGBuilder { 97 /// The current instruction being visited. 98 const Instruction *CurInst = nullptr; 99 100 DenseMap<const Value*, SDValue> NodeMap; 101 102 /// Maps argument value for unused arguments. This is used 103 /// to preserve debug information for incoming arguments. 104 DenseMap<const Value*, SDValue> UnusedArgNodeMap; 105 106 /// Helper type for DanglingDebugInfoMap. 107 class DanglingDebugInfo { 108 const DbgValueInst* DI = nullptr; 109 DebugLoc dl; 110 unsigned SDNodeOrder = 0; 111 112 public: 113 DanglingDebugInfo() = default; 114 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) 115 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {} 116 117 const DbgValueInst* getDI() { return DI; } 118 DebugLoc getdl() { return dl; } 119 unsigned getSDNodeOrder() { return SDNodeOrder; } 120 }; 121 122 /// Helper type for DanglingDebugInfoMap. 123 typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector; 124 125 /// Keeps track of dbg_values for which we have not yet seen the referent. 126 /// We defer handling these until we do see it. 127 MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap; 128 129public: 130 /// Loads are not emitted to the program immediately. We bunch them up and 131 /// then emit token factor nodes when possible. This allows us to get simple 132 /// disambiguation between loads without worrying about alias analysis. 133 SmallVector<SDValue, 8> PendingLoads; 134 135 /// State used while lowering a statepoint sequence (gc_statepoint, 136 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details. 137 StatepointLoweringState StatepointLowering; 138 139private: 140 /// CopyToReg nodes that copy values to virtual registers for export to other 141 /// blocks need to be emitted before any terminator instruction, but they have 142 /// no other ordering requirements. We bunch them up and the emit a single 143 /// tokenfactor for them just before terminator instructions. 144 SmallVector<SDValue, 8> PendingExports; 145 146 /// A unique monotonically increasing number used to order the SDNodes we 147 /// create. 148 unsigned SDNodeOrder; 149 150 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight 151 /// than each cluster in the range, its rank is 0. 152 unsigned caseClusterRank(const SwitchCG::CaseCluster &CC, 153 SwitchCG::CaseClusterIt First, 154 SwitchCG::CaseClusterIt Last); 155 156 /// Emit comparison and split W into two subtrees. 157 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList, 158 const SwitchCG::SwitchWorkListItem &W, Value *Cond, 159 MachineBasicBlock *SwitchMBB); 160 161 /// Lower W. 162 void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond, 163 MachineBasicBlock *SwitchMBB, 164 MachineBasicBlock *DefaultMBB); 165 166 /// Peel the top probability case if it exceeds the threshold 167 MachineBasicBlock * 168 peelDominantCaseCluster(const SwitchInst &SI, 169 SwitchCG::CaseClusterVector &Clusters, 170 BranchProbability &PeeledCaseProb); 171 172 /// A class which encapsulates all of the information needed to generate a 173 /// stack protector check and signals to isel via its state being initialized 174 /// that a stack protector needs to be generated. 175 /// 176 /// *NOTE* The following is a high level documentation of SelectionDAG Stack 177 /// Protector Generation. The reason that it is placed here is for a lack of 178 /// other good places to stick it. 179 /// 180 /// High Level Overview of SelectionDAG Stack Protector Generation: 181 /// 182 /// Previously, generation of stack protectors was done exclusively in the 183 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated 184 /// splitting basic blocks at the IR level to create the success/failure basic 185 /// blocks in the tail of the basic block in question. As a result of this, 186 /// calls that would have qualified for the sibling call optimization were no 187 /// longer eligible for optimization since said calls were no longer right in 188 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst 189 /// instruction). 190 /// 191 /// Then it was noticed that since the sibling call optimization causes the 192 /// callee to reuse the caller's stack, if we could delay the generation of 193 /// the stack protector check until later in CodeGen after the sibling call 194 /// decision was made, we get both the tail call optimization and the stack 195 /// protector check! 196 /// 197 /// A few goals in solving this problem were: 198 /// 199 /// 1. Preserve the architecture independence of stack protector generation. 200 /// 201 /// 2. Preserve the normal IR level stack protector check for platforms like 202 /// OpenBSD for which we support platform-specific stack protector 203 /// generation. 204 /// 205 /// The main problem that guided the present solution is that one can not 206 /// solve this problem in an architecture independent manner at the IR level 207 /// only. This is because: 208 /// 209 /// 1. The decision on whether or not to perform a sibling call on certain 210 /// platforms (for instance i386) requires lower level information 211 /// related to available registers that can not be known at the IR level. 212 /// 213 /// 2. Even if the previous point were not true, the decision on whether to 214 /// perform a tail call is done in LowerCallTo in SelectionDAG which 215 /// occurs after the Stack Protector Pass. As a result, one would need to 216 /// put the relevant callinst into the stack protector check success 217 /// basic block (where the return inst is placed) and then move it back 218 /// later at SelectionDAG/MI time before the stack protector check if the 219 /// tail call optimization failed. The MI level option was nixed 220 /// immediately since it would require platform-specific pattern 221 /// matching. The SelectionDAG level option was nixed because 222 /// SelectionDAG only processes one IR level basic block at a time 223 /// implying one could not create a DAG Combine to move the callinst. 224 /// 225 /// To get around this problem a few things were realized: 226 /// 227 /// 1. While one can not handle multiple IR level basic blocks at the 228 /// SelectionDAG Level, one can generate multiple machine basic blocks 229 /// for one IR level basic block. This is how we handle bit tests and 230 /// switches. 231 /// 232 /// 2. At the MI level, tail calls are represented via a special return 233 /// MIInst called "tcreturn". Thus if we know the basic block in which we 234 /// wish to insert the stack protector check, we get the correct behavior 235 /// by always inserting the stack protector check right before the return 236 /// statement. This is a "magical transformation" since no matter where 237 /// the stack protector check intrinsic is, we always insert the stack 238 /// protector check code at the end of the BB. 239 /// 240 /// Given the aforementioned constraints, the following solution was devised: 241 /// 242 /// 1. On platforms that do not support SelectionDAG stack protector check 243 /// generation, allow for the normal IR level stack protector check 244 /// generation to continue. 245 /// 246 /// 2. On platforms that do support SelectionDAG stack protector check 247 /// generation: 248 /// 249 /// a. Use the IR level stack protector pass to decide if a stack 250 /// protector is required/which BB we insert the stack protector check 251 /// in by reusing the logic already therein. If we wish to generate a 252 /// stack protector check in a basic block, we place a special IR 253 /// intrinsic called llvm.stackprotectorcheck right before the BB's 254 /// returninst or if there is a callinst that could potentially be 255 /// sibling call optimized, before the call inst. 256 /// 257 /// b. Then when a BB with said intrinsic is processed, we codegen the BB 258 /// normally via SelectBasicBlock. In said process, when we visit the 259 /// stack protector check, we do not actually emit anything into the 260 /// BB. Instead, we just initialize the stack protector descriptor 261 /// class (which involves stashing information/creating the success 262 /// mbbb and the failure mbb if we have not created one for this 263 /// function yet) and export the guard variable that we are going to 264 /// compare. 265 /// 266 /// c. After we finish selecting the basic block, in FinishBasicBlock if 267 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is 268 /// initialized, we produce the validation code with one of these 269 /// techniques: 270 /// 1) with a call to a guard check function 271 /// 2) with inlined instrumentation 272 /// 273 /// 1) We insert a call to the check function before the terminator. 274 /// 275 /// 2) We first find a splice point in the parent basic block 276 /// before the terminator and then splice the terminator of said basic 277 /// block into the success basic block. Then we code-gen a new tail for 278 /// the parent basic block consisting of the two loads, the comparison, 279 /// and finally two branches to the success/failure basic blocks. We 280 /// conclude by code-gening the failure basic block if we have not 281 /// code-gened it already (all stack protector checks we generate in 282 /// the same function, use the same failure basic block). 283 class StackProtectorDescriptor { 284 public: 285 StackProtectorDescriptor() = default; 286 287 /// Returns true if all fields of the stack protector descriptor are 288 /// initialized implying that we should/are ready to emit a stack protector. 289 bool shouldEmitStackProtector() const { 290 return ParentMBB && SuccessMBB && FailureMBB; 291 } 292 293 bool shouldEmitFunctionBasedCheckStackProtector() const { 294 return ParentMBB && !SuccessMBB && !FailureMBB; 295 } 296 297 /// Initialize the stack protector descriptor structure for a new basic 298 /// block. 299 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, 300 bool FunctionBasedInstrumentation) { 301 // Make sure we are not initialized yet. 302 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " 303 "already initialized!"); 304 ParentMBB = MBB; 305 if (!FunctionBasedInstrumentation) { 306 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); 307 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); 308 } 309 } 310 311 /// Reset state that changes when we handle different basic blocks. 312 /// 313 /// This currently includes: 314 /// 315 /// 1. The specific basic block we are generating a 316 /// stack protector for (ParentMBB). 317 /// 318 /// 2. The successor machine basic block that will contain the tail of 319 /// parent mbb after we create the stack protector check (SuccessMBB). This 320 /// BB is visited only on stack protector check success. 321 void resetPerBBState() { 322 ParentMBB = nullptr; 323 SuccessMBB = nullptr; 324 } 325 326 /// Reset state that only changes when we switch functions. 327 /// 328 /// This currently includes: 329 /// 330 /// 1. FailureMBB since we reuse the failure code path for all stack 331 /// protector checks created in an individual function. 332 /// 333 /// 2.The guard variable since the guard variable we are checking against is 334 /// always the same. 335 void resetPerFunctionState() { 336 FailureMBB = nullptr; 337 } 338 339 MachineBasicBlock *getParentMBB() { return ParentMBB; } 340 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } 341 MachineBasicBlock *getFailureMBB() { return FailureMBB; } 342 343 private: 344 /// The basic block for which we are generating the stack protector. 345 /// 346 /// As a result of stack protector generation, we will splice the 347 /// terminators of this basic block into the successor mbb SuccessMBB and 348 /// replace it with a compare/branch to the successor mbbs 349 /// SuccessMBB/FailureMBB depending on whether or not the stack protector 350 /// was violated. 351 MachineBasicBlock *ParentMBB = nullptr; 352 353 /// A basic block visited on stack protector check success that contains the 354 /// terminators of ParentMBB. 355 MachineBasicBlock *SuccessMBB = nullptr; 356 357 /// This basic block visited on stack protector check failure that will 358 /// contain a call to __stack_chk_fail(). 359 MachineBasicBlock *FailureMBB = nullptr; 360 361 /// Add a successor machine basic block to ParentMBB. If the successor mbb 362 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic 363 /// block will be created. Assign a large weight if IsLikely is true. 364 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, 365 MachineBasicBlock *ParentMBB, 366 bool IsLikely, 367 MachineBasicBlock *SuccMBB = nullptr); 368 }; 369 370private: 371 const TargetMachine &TM; 372 373public: 374 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling 375 /// nodes without a corresponding SDNode. 376 static const unsigned LowestSDNodeOrder = 1; 377 378 SelectionDAG &DAG; 379 const DataLayout *DL = nullptr; 380 AliasAnalysis *AA = nullptr; 381 const TargetLibraryInfo *LibInfo; 382 383 class SDAGSwitchLowering : public SwitchCG::SwitchLowering { 384 public: 385 SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo) 386 : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {} 387 388 virtual void addSuccessorWithProb( 389 MachineBasicBlock *Src, MachineBasicBlock *Dst, 390 BranchProbability Prob = BranchProbability::getUnknown()) override { 391 SDB->addSuccessorWithProb(Src, Dst, Prob); 392 } 393 394 private: 395 SelectionDAGBuilder *SDB; 396 }; 397 398 std::unique_ptr<SDAGSwitchLowering> SL; 399 400 /// A StackProtectorDescriptor structure used to communicate stack protector 401 /// information in between SelectBasicBlock and FinishBasicBlock. 402 StackProtectorDescriptor SPDescriptor; 403 404 // Emit PHI-node-operand constants only once even if used by multiple 405 // PHI nodes. 406 DenseMap<const Constant *, unsigned> ConstantsOut; 407 408 /// Information about the function as a whole. 409 FunctionLoweringInfo &FuncInfo; 410 411 /// Information about the swifterror values used throughout the function. 412 SwiftErrorValueTracking &SwiftError; 413 414 /// Garbage collection metadata for the function. 415 GCFunctionInfo *GFI; 416 417 /// Map a landing pad to the call site indexes. 418 DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap; 419 420 /// This is set to true if a call in the current block has been translated as 421 /// a tail call. In this case, no subsequent DAG nodes should be created. 422 bool HasTailCall = false; 423 424 LLVMContext *Context; 425 426 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, 427 SwiftErrorValueTracking &swifterror, CodeGenOpt::Level ol) 428 : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag), 429 SL(make_unique<SDAGSwitchLowering>(this, funcinfo)), FuncInfo(funcinfo), 430 SwiftError(swifterror) {} 431 432 void init(GCFunctionInfo *gfi, AliasAnalysis *AA, 433 const TargetLibraryInfo *li); 434 435 /// Clear out the current SelectionDAG and the associated state and prepare 436 /// this SelectionDAGBuilder object to be used for a new block. This doesn't 437 /// clear out information about additional blocks that are needed to complete 438 /// switch lowering or PHI node updating; that information is cleared out as 439 /// it is consumed. 440 void clear(); 441 442 /// Clear the dangling debug information map. This function is separated from 443 /// the clear so that debug information that is dangling in a basic block can 444 /// be properly resolved in a different basic block. This allows the 445 /// SelectionDAG to resolve dangling debug information attached to PHI nodes. 446 void clearDanglingDebugInfo(); 447 448 /// Return the current virtual root of the Selection DAG, flushing any 449 /// PendingLoad items. This must be done before emitting a store or any other 450 /// node that may need to be ordered after any prior load instructions. 451 SDValue getRoot(); 452 453 /// Similar to getRoot, but instead of flushing all the PendingLoad items, 454 /// flush all the PendingExports items. It is necessary to do this before 455 /// emitting a terminator instruction. 456 SDValue getControlRoot(); 457 458 SDLoc getCurSDLoc() const { 459 return SDLoc(CurInst, SDNodeOrder); 460 } 461 462 DebugLoc getCurDebugLoc() const { 463 return CurInst ? CurInst->getDebugLoc() : DebugLoc(); 464 } 465 466 void CopyValueToVirtualRegister(const Value *V, unsigned Reg); 467 468 void visit(const Instruction &I); 469 470 void visit(unsigned Opcode, const User &I); 471 472 /// If there was virtual register allocated for the value V emit CopyFromReg 473 /// of the specified type Ty. Return empty SDValue() otherwise. 474 SDValue getCopyFromRegs(const Value *V, Type *Ty); 475 476 /// If we have dangling debug info that describes \p Variable, or an 477 /// overlapping part of variable considering the \p Expr, then this method 478 /// will drop that debug info as it isn't valid any longer. 479 void dropDanglingDebugInfo(const DILocalVariable *Variable, 480 const DIExpression *Expr); 481 482 /// If we saw an earlier dbg_value referring to V, generate the debug data 483 /// structures now that we've seen its definition. 484 void resolveDanglingDebugInfo(const Value *V, SDValue Val); 485 486 /// For the given dangling debuginfo record, perform last-ditch efforts to 487 /// resolve the debuginfo to something that is represented in this DAG. If 488 /// this cannot be done, produce an Undef debug value record. 489 void salvageUnresolvedDbgValue(DanglingDebugInfo &DDI); 490 491 /// For a given Value, attempt to create and record a SDDbgValue in the 492 /// SelectionDAG. 493 bool handleDebugValue(const Value *V, DILocalVariable *Var, 494 DIExpression *Expr, DebugLoc CurDL, 495 DebugLoc InstDL, unsigned Order); 496 497 /// Evict any dangling debug information, attempting to salvage it first. 498 void resolveOrClearDbgInfo(); 499 500 SDValue getValue(const Value *V); 501 bool findValue(const Value *V) const; 502 503 /// Return the SDNode for the specified IR value if it exists. 504 SDNode *getNodeForIRValue(const Value *V) { 505 if (NodeMap.find(V) == NodeMap.end()) 506 return nullptr; 507 return NodeMap[V].getNode(); 508 } 509 510 SDValue getNonRegisterValue(const Value *V); 511 SDValue getValueImpl(const Value *V); 512 513 void setValue(const Value *V, SDValue NewN) { 514 SDValue &N = NodeMap[V]; 515 assert(!N.getNode() && "Already set a value for this node!"); 516 N = NewN; 517 } 518 519 void setUnusedArgValue(const Value *V, SDValue NewN) { 520 SDValue &N = UnusedArgNodeMap[V]; 521 assert(!N.getNode() && "Already set a value for this node!"); 522 N = NewN; 523 } 524 525 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, 526 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 527 MachineBasicBlock *SwitchBB, 528 Instruction::BinaryOps Opc, BranchProbability TProb, 529 BranchProbability FProb, bool InvertCond); 530 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, 531 MachineBasicBlock *FBB, 532 MachineBasicBlock *CurBB, 533 MachineBasicBlock *SwitchBB, 534 BranchProbability TProb, BranchProbability FProb, 535 bool InvertCond); 536 bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases); 537 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); 538 void CopyToExportRegsIfNeeded(const Value *V); 539 void ExportFromCurrentBlock(const Value *V); 540 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, 541 const BasicBlock *EHPadBB = nullptr); 542 543 // Lower range metadata from 0 to N to assert zext to an integer of nearest 544 // floor power of two. 545 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, 546 SDValue Op); 547 548 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, 549 const CallBase *Call, unsigned ArgIdx, 550 unsigned NumArgs, SDValue Callee, 551 Type *ReturnTy, bool IsPatchPoint); 552 553 std::pair<SDValue, SDValue> 554 lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 555 const BasicBlock *EHPadBB = nullptr); 556 557 /// When an MBB was split during scheduling, update the 558 /// references that need to refer to the last resulting block. 559 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); 560 561 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes 562 /// of lowering into a STATEPOINT node. 563 struct StatepointLoweringInfo { 564 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set 565 /// of gc pointers this STATEPOINT has to relocate. 566 SmallVector<const Value *, 16> Bases; 567 SmallVector<const Value *, 16> Ptrs; 568 569 /// The set of gc.relocate calls associated with this gc.statepoint. 570 SmallVector<const GCRelocateInst *, 16> GCRelocates; 571 572 /// The full list of gc arguments to the gc.statepoint being lowered. 573 ArrayRef<const Use> GCArgs; 574 575 /// The gc.statepoint instruction. 576 const Instruction *StatepointInstr = nullptr; 577 578 /// The list of gc transition arguments present in the gc.statepoint being 579 /// lowered. 580 ArrayRef<const Use> GCTransitionArgs; 581 582 /// The ID that the resulting STATEPOINT instruction has to report. 583 unsigned ID = -1; 584 585 /// Information regarding the underlying call instruction. 586 TargetLowering::CallLoweringInfo CLI; 587 588 /// The deoptimization state associated with this gc.statepoint call, if 589 /// any. 590 ArrayRef<const Use> DeoptState; 591 592 /// Flags associated with the meta arguments being lowered. 593 uint64_t StatepointFlags = -1; 594 595 /// The number of patchable bytes the call needs to get lowered into. 596 unsigned NumPatchBytes = -1; 597 598 /// The exception handling unwind destination, in case this represents an 599 /// invoke of gc.statepoint. 600 const BasicBlock *EHPadBB = nullptr; 601 602 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {} 603 }; 604 605 /// Lower \p SLI into a STATEPOINT instruction. 606 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI); 607 608 // This function is responsible for the whole statepoint lowering process. 609 // It uniformly handles invoke and call statepoints. 610 void LowerStatepoint(ImmutableStatepoint ISP, 611 const BasicBlock *EHPadBB = nullptr); 612 613 void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, 614 const BasicBlock *EHPadBB); 615 616 void LowerDeoptimizeCall(const CallInst *CI); 617 void LowerDeoptimizingReturn(); 618 619 void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee, 620 const BasicBlock *EHPadBB, 621 bool VarArgDisallowed, 622 bool ForceVoidReturnTy); 623 624 /// Returns the type of FrameIndex and TargetFrameIndex nodes. 625 MVT getFrameIndexTy() { 626 return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()); 627 } 628 629private: 630 // Terminator instructions. 631 void visitRet(const ReturnInst &I); 632 void visitBr(const BranchInst &I); 633 void visitSwitch(const SwitchInst &I); 634 void visitIndirectBr(const IndirectBrInst &I); 635 void visitUnreachable(const UnreachableInst &I); 636 void visitCleanupRet(const CleanupReturnInst &I); 637 void visitCatchSwitch(const CatchSwitchInst &I); 638 void visitCatchRet(const CatchReturnInst &I); 639 void visitCatchPad(const CatchPadInst &I); 640 void visitCleanupPad(const CleanupPadInst &CPI); 641 642 BranchProbability getEdgeProbability(const MachineBasicBlock *Src, 643 const MachineBasicBlock *Dst) const; 644 void addSuccessorWithProb( 645 MachineBasicBlock *Src, MachineBasicBlock *Dst, 646 BranchProbability Prob = BranchProbability::getUnknown()); 647 648public: 649 void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB); 650 void visitSPDescriptorParent(StackProtectorDescriptor &SPD, 651 MachineBasicBlock *ParentBB); 652 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); 653 void visitBitTestHeader(SwitchCG::BitTestBlock &B, 654 MachineBasicBlock *SwitchBB); 655 void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, 656 BranchProbability BranchProbToNext, unsigned Reg, 657 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB); 658 void visitJumpTable(SwitchCG::JumpTable &JT); 659 void visitJumpTableHeader(SwitchCG::JumpTable &JT, 660 SwitchCG::JumpTableHeader &JTH, 661 MachineBasicBlock *SwitchBB); 662 663private: 664 // These all get lowered before this pass. 665 void visitInvoke(const InvokeInst &I); 666 void visitCallBr(const CallBrInst &I); 667 void visitResume(const ResumeInst &I); 668 669 void visitUnary(const User &I, unsigned Opcode); 670 void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); } 671 672 void visitBinary(const User &I, unsigned Opcode); 673 void visitShift(const User &I, unsigned Opcode); 674 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); } 675 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); } 676 void visitSub(const User &I) { visitBinary(I, ISD::SUB); } 677 void visitFSub(const User &I); 678 void visitMul(const User &I) { visitBinary(I, ISD::MUL); } 679 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); } 680 void visitURem(const User &I) { visitBinary(I, ISD::UREM); } 681 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); } 682 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); } 683 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); } 684 void visitSDiv(const User &I); 685 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); } 686 void visitAnd (const User &I) { visitBinary(I, ISD::AND); } 687 void visitOr (const User &I) { visitBinary(I, ISD::OR); } 688 void visitXor (const User &I) { visitBinary(I, ISD::XOR); } 689 void visitShl (const User &I) { visitShift(I, ISD::SHL); } 690 void visitLShr(const User &I) { visitShift(I, ISD::SRL); } 691 void visitAShr(const User &I) { visitShift(I, ISD::SRA); } 692 void visitICmp(const User &I); 693 void visitFCmp(const User &I); 694 // Visit the conversion instructions 695 void visitTrunc(const User &I); 696 void visitZExt(const User &I); 697 void visitSExt(const User &I); 698 void visitFPTrunc(const User &I); 699 void visitFPExt(const User &I); 700 void visitFPToUI(const User &I); 701 void visitFPToSI(const User &I); 702 void visitUIToFP(const User &I); 703 void visitSIToFP(const User &I); 704 void visitPtrToInt(const User &I); 705 void visitIntToPtr(const User &I); 706 void visitBitCast(const User &I); 707 void visitAddrSpaceCast(const User &I); 708 709 void visitExtractElement(const User &I); 710 void visitInsertElement(const User &I); 711 void visitShuffleVector(const User &I); 712 713 void visitExtractValue(const User &I); 714 void visitInsertValue(const User &I); 715 void visitLandingPad(const LandingPadInst &LP); 716 717 void visitGetElementPtr(const User &I); 718 void visitSelect(const User &I); 719 720 void visitAlloca(const AllocaInst &I); 721 void visitLoad(const LoadInst &I); 722 void visitStore(const StoreInst &I); 723 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false); 724 void visitMaskedStore(const CallInst &I, bool IsCompressing = false); 725 void visitMaskedGather(const CallInst &I); 726 void visitMaskedScatter(const CallInst &I); 727 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); 728 void visitAtomicRMW(const AtomicRMWInst &I); 729 void visitFence(const FenceInst &I); 730 void visitPHI(const PHINode &I); 731 void visitCall(const CallInst &I); 732 bool visitMemCmpCall(const CallInst &I); 733 bool visitMemPCpyCall(const CallInst &I); 734 bool visitMemChrCall(const CallInst &I); 735 bool visitStrCpyCall(const CallInst &I, bool isStpcpy); 736 bool visitStrCmpCall(const CallInst &I); 737 bool visitStrLenCall(const CallInst &I); 738 bool visitStrNLenCall(const CallInst &I); 739 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); 740 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode); 741 void visitAtomicLoad(const LoadInst &I); 742 void visitAtomicStore(const StoreInst &I); 743 void visitLoadFromSwiftError(const LoadInst &I); 744 void visitStoreToSwiftError(const StoreInst &I); 745 746 void visitInlineAsm(ImmutableCallSite CS); 747 void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); 748 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); 749 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); 750 751 void visitVAStart(const CallInst &I); 752 void visitVAArg(const VAArgInst &I); 753 void visitVAEnd(const CallInst &I); 754 void visitVACopy(const CallInst &I); 755 void visitStackmap(const CallInst &I); 756 void visitPatchpoint(ImmutableCallSite CS, 757 const BasicBlock *EHPadBB = nullptr); 758 759 // These two are implemented in StatepointLowering.cpp 760 void visitGCRelocate(const GCRelocateInst &Relocate); 761 void visitGCResult(const GCResultInst &I); 762 763 void visitVectorReduce(const CallInst &I, unsigned Intrinsic); 764 765 void visitUserOp1(const Instruction &I) { 766 llvm_unreachable("UserOp1 should not exist at instruction selection time!"); 767 } 768 void visitUserOp2(const Instruction &I) { 769 llvm_unreachable("UserOp2 should not exist at instruction selection time!"); 770 } 771 772 void processIntegerCallValue(const Instruction &I, 773 SDValue Value, bool IsSigned); 774 775 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); 776 777 void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message); 778 779 /// If V is an function argument then create corresponding DBG_VALUE machine 780 /// instruction for it now. At the end of instruction selection, they will be 781 /// inserted to the entry BB. 782 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, 783 DIExpression *Expr, DILocation *DL, 784 bool IsDbgDeclare, const SDValue &N); 785 786 /// Return the next block after MBB, or nullptr if there is none. 787 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB); 788 789 /// Update the DAG and DAG builder with the relevant information after 790 /// a new root node has been created which could be a tail call. 791 void updateDAGForMaybeTailCall(SDValue MaybeTC); 792 793 /// Return the appropriate SDDbgValue based on N. 794 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable, 795 DIExpression *Expr, const DebugLoc &dl, 796 unsigned DbgSDNodeOrder); 797 798 /// Lowers CallInst to an external symbol. 799 void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName); 800}; 801 802/// This struct represents the registers (physical or virtual) 803/// that a particular set of values is assigned, and the type information about 804/// the value. The most common situation is to represent one value at a time, 805/// but struct or array values are handled element-wise as multiple values. The 806/// splitting of aggregates is performed recursively, so that we never have 807/// aggregate-typed registers. The values at this point do not necessarily have 808/// legal types, so each value may require one or more registers of some legal 809/// type. 810/// 811struct RegsForValue { 812 /// The value types of the values, which may not be legal, and 813 /// may need be promoted or synthesized from one or more registers. 814 SmallVector<EVT, 4> ValueVTs; 815 816 /// The value types of the registers. This is the same size as ValueVTs and it 817 /// records, for each value, what the type of the assigned register or 818 /// registers are. (Individual values are never synthesized from more than one 819 /// type of register.) 820 /// 821 /// With virtual registers, the contents of RegVTs is redundant with TLI's 822 /// getRegisterType member function, however when with physical registers 823 /// it is necessary to have a separate record of the types. 824 SmallVector<MVT, 4> RegVTs; 825 826 /// This list holds the registers assigned to the values. 827 /// Each legal or promoted value requires one register, and each 828 /// expanded value requires multiple registers. 829 SmallVector<unsigned, 4> Regs; 830 831 /// This list holds the number of registers for each value. 832 SmallVector<unsigned, 4> RegCount; 833 834 /// Records if this value needs to be treated in an ABI dependant manner, 835 /// different to normal type legalization. 836 Optional<CallingConv::ID> CallConv; 837 838 RegsForValue() = default; 839 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt, 840 Optional<CallingConv::ID> CC = None); 841 RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 842 const DataLayout &DL, unsigned Reg, Type *Ty, 843 Optional<CallingConv::ID> CC); 844 845 bool isABIMangled() const { 846 return CallConv.hasValue(); 847 } 848 849 /// Add the specified values to this one. 850 void append(const RegsForValue &RHS) { 851 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); 852 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); 853 Regs.append(RHS.Regs.begin(), RHS.Regs.end()); 854 RegCount.push_back(RHS.Regs.size()); 855 } 856 857 /// Emit a series of CopyFromReg nodes that copies from this value and returns 858 /// the result as a ValueVTs value. This uses Chain/Flag as the input and 859 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no 860 /// flag is used. 861 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, 862 const SDLoc &dl, SDValue &Chain, SDValue *Flag, 863 const Value *V = nullptr) const; 864 865 /// Emit a series of CopyToReg nodes that copies the specified value into the 866 /// registers specified by this object. This uses Chain/Flag as the input and 867 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no 868 /// flag is used. If V is not nullptr, then it is used in printing better 869 /// diagnostic messages on error. 870 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, 871 SDValue &Chain, SDValue *Flag, const Value *V = nullptr, 872 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const; 873 874 /// Add this value to the specified inlineasm node operand list. This adds the 875 /// code marker, matching input operand index (if applicable), and includes 876 /// the number of values added into it. 877 void AddInlineAsmOperands(unsigned Code, bool HasMatching, 878 unsigned MatchingIdx, const SDLoc &dl, 879 SelectionDAG &DAG, std::vector<SDValue> &Ops) const; 880 881 /// Check if the total RegCount is greater than one. 882 bool occupiesMultipleRegs() const { 883 return std::accumulate(RegCount.begin(), RegCount.end(), 0) > 1; 884 } 885 886 /// Return a list of registers and their sizes. 887 SmallVector<std::pair<unsigned, unsigned>, 4> getRegsAndSizes() const; 888}; 889 890} // end namespace llvm 891 892#endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 893