523 if (TotalOffs >= MaxOffs) { 524 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 525 if (!N) // Unhandled operand. Halt "fast" selection and bail. 526 return false; 527 NIsKill = true; 528 TotalOffs = 0; 529 } 530 continue; 531 } 532 if (TotalOffs) { 533 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 534 if (!N) // Unhandled operand. Halt "fast" selection and bail. 535 return false; 536 NIsKill = true; 537 TotalOffs = 0; 538 } 539 540 // N = N + Idx * ElementSize; 541 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 542 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); 543 unsigned IdxN = Pair.first; 544 bool IdxNIsKill = Pair.second; 545 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 546 return false; 547 548 if (ElementSize != 1) { 549 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 550 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 551 return false; 552 IdxNIsKill = true; 553 } 554 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 555 if (!N) // Unhandled operand. Halt "fast" selection and bail. 556 return false; 557 } 558 } 559 if (TotalOffs) { 560 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 561 if (!N) // Unhandled operand. Halt "fast" selection and bail. 562 return false; 563 } 564 565 // We successfully emitted code for the given LLVM Instruction. 566 updateValueMap(I, N); 567 return true; 568} 569 570bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 571 const CallInst *CI, unsigned StartIdx) { 572 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) { 573 Value *Val = CI->getArgOperand(i); 574 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 575 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 576 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 577 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 578 } else if (isa<ConstantPointerNull>(Val)) { 579 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 580 Ops.push_back(MachineOperand::CreateImm(0)); 581 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 582 // Values coming from a stack location also require a sepcial encoding, 583 // but that is added later on by the target specific frame index 584 // elimination implementation. 585 auto SI = FuncInfo.StaticAllocaMap.find(AI); 586 if (SI != FuncInfo.StaticAllocaMap.end()) 587 Ops.push_back(MachineOperand::CreateFI(SI->second)); 588 else 589 return false; 590 } else { 591 unsigned Reg = getRegForValue(Val); 592 if (!Reg) 593 return false; 594 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 595 } 596 } 597 return true; 598} 599 600bool FastISel::selectStackmap(const CallInst *I) { 601 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 602 // [live variables...]) 603 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 604 "Stackmap cannot return a value."); 605 606 // The stackmap intrinsic only records the live variables (the arguments 607 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 608 // intrinsic, this won't be lowered to a function call. This means we don't 609 // have to worry about calling conventions and target-specific lowering code. 610 // Instead we perform the call lowering right here. 611 // 612 // CALLSEQ_START(0) 613 // STACKMAP(id, nbytes, ...) 614 // CALLSEQ_END(0, 0) 615 // 616 SmallVector<MachineOperand, 32> Ops; 617 618 // Add the <id> and <numBytes> constants. 619 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 620 "Expected a constant integer."); 621 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 622 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 623 624 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 625 "Expected a constant integer."); 626 const auto *NumBytes = 627 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 628 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 629 630 // Push live variables for the stack map (skipping the first two arguments 631 // <id> and <numBytes>). 632 if (!addStackMapLiveVars(Ops, I, 2)) 633 return false; 634 635 // We are not adding any register mask info here, because the stackmap doesn't 636 // clobber anything. 637 638 // Add scratch registers as implicit def and early clobber. 639 CallingConv::ID CC = I->getCallingConv(); 640 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 641 for (unsigned i = 0; ScratchRegs[i]; ++i) 642 Ops.push_back(MachineOperand::CreateReg( 643 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 644 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 645 646 // Issue CALLSEQ_START 647 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) 649 .addImm(0); 650 651 // Issue STACKMAP. 652 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 653 TII.get(TargetOpcode::STACKMAP)); 654 for (auto const &MO : Ops) 655 MIB.addOperand(MO); 656 657 // Issue CALLSEQ_END 658 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 660 .addImm(0) 661 .addImm(0); 662 663 // Inform the Frame Information that we have a stackmap in this function. 664 FuncInfo.MF->getFrameInfo()->setHasStackMap(); 665 666 return true; 667} 668 669/// \brief Lower an argument list according to the target calling convention. 670/// 671/// This is a helper for lowering intrinsics that follow a target calling 672/// convention or require stack pointer adjustment. Only a subset of the 673/// intrinsic's operands need to participate in the calling convention. 674bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 675 unsigned NumArgs, const Value *Callee, 676 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 677 ArgListTy Args; 678 Args.reserve(NumArgs); 679 680 // Populate the argument list. 681 // Attributes for args start at offset 1, after the return attribute. 682 ImmutableCallSite CS(CI); 683 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1; 684 ArgI != ArgE; ++ArgI) { 685 Value *V = CI->getOperand(ArgI); 686 687 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 688 689 ArgListEntry Entry; 690 Entry.Val = V; 691 Entry.Ty = V->getType(); 692 Entry.setAttributes(&CS, AttrI); 693 Args.push_back(Entry); 694 } 695 696 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 697 : CI->getType(); 698 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 699 700 return lowerCallTo(CLI); 701} 702 703bool FastISel::selectPatchpoint(const CallInst *I) { 704 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 705 // i32 <numBytes>, 706 // i8* <target>, 707 // i32 <numArgs>, 708 // [Args...], 709 // [live variables...]) 710 CallingConv::ID CC = I->getCallingConv(); 711 bool IsAnyRegCC = CC == CallingConv::AnyReg; 712 bool HasDef = !I->getType()->isVoidTy(); 713 Value *Callee = I->getOperand(PatchPointOpers::TargetPos); 714 715 // Get the real number of arguments participating in the call <numArgs> 716 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 717 "Expected a constant integer."); 718 const auto *NumArgsVal = 719 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 720 unsigned NumArgs = NumArgsVal->getZExtValue(); 721 722 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 723 // This includes all meta-operands up to but not including CC. 724 unsigned NumMetaOpers = PatchPointOpers::CCPos; 725 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs && 726 "Not enough arguments provided to the patchpoint intrinsic"); 727 728 // For AnyRegCC the arguments are lowered later on manually. 729 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 730 CallLoweringInfo CLI; 731 CLI.setIsPatchPoint(); 732 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 733 return false; 734 735 assert(CLI.Call && "No call instruction specified."); 736 737 SmallVector<MachineOperand, 32> Ops; 738 739 // Add an explicit result reg if we use the anyreg calling convention. 740 if (IsAnyRegCC && HasDef) { 741 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 742 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 743 CLI.NumResultRegs = 1; 744 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true)); 745 } 746 747 // Add the <id> and <numBytes> constants. 748 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 749 "Expected a constant integer."); 750 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 751 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 752 753 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 754 "Expected a constant integer."); 755 const auto *NumBytes = 756 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 757 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 758 759 // Assume that the callee is a constant address or null pointer. 760 // FIXME: handle function symbols in the future. 761 uint64_t CalleeAddr; 762 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) 763 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 764 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 765 if (C->getOpcode() == Instruction::IntToPtr) 766 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 767 else 768 llvm_unreachable("Unsupported ConstantExpr."); 769 } else if (isa<ConstantPointerNull>(Callee)) 770 CalleeAddr = 0; 771 else 772 llvm_unreachable("Unsupported callee address."); 773 774 Ops.push_back(MachineOperand::CreateImm(CalleeAddr)); 775 776 // Adjust <numArgs> to account for any arguments that have been passed on 777 // the stack instead. 778 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 779 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 780 781 // Add the calling convention 782 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 783 784 // Add the arguments we omitted previously. The register allocator should 785 // place these in any free register. 786 if (IsAnyRegCC) { 787 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 788 unsigned Reg = getRegForValue(I->getArgOperand(i)); 789 if (!Reg) 790 return false; 791 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 792 } 793 } 794 795 // Push the arguments from the call instruction. 796 for (auto Reg : CLI.OutRegs) 797 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 798 799 // Push live variables for the stack map. 800 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 801 return false; 802 803 // Push the register mask info. 804 Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC))); 805 806 // Add scratch registers as implicit def and early clobber. 807 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 808 for (unsigned i = 0; ScratchRegs[i]; ++i) 809 Ops.push_back(MachineOperand::CreateReg( 810 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 811 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 812 813 // Add implicit defs (return values). 814 for (auto Reg : CLI.InRegs) 815 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true, 816 /*IsImpl=*/true)); 817 818 // Insert the patchpoint instruction before the call generated by the target. 819 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, 820 TII.get(TargetOpcode::PATCHPOINT)); 821 822 for (auto &MO : Ops) 823 MIB.addOperand(MO); 824 825 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 826 827 // Delete the original call instruction. 828 CLI.Call->eraseFromParent(); 829 830 // Inform the Frame Information that we have a patchpoint in this function. 831 FuncInfo.MF->getFrameInfo()->setHasPatchPoint(); 832 833 if (CLI.NumResultRegs) 834 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 835 return true; 836} 837 838/// Returns an AttributeSet representing the attributes applied to the return 839/// value of the given call. 840static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 841 SmallVector<Attribute::AttrKind, 2> Attrs; 842 if (CLI.RetSExt) 843 Attrs.push_back(Attribute::SExt); 844 if (CLI.RetZExt) 845 Attrs.push_back(Attribute::ZExt); 846 if (CLI.IsInReg) 847 Attrs.push_back(Attribute::InReg); 848 849 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex, 850 Attrs); 851} 852 853bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 854 unsigned NumArgs) { 855 ImmutableCallSite CS(CI); 856 857 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 858 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 859 Type *RetTy = FTy->getReturnType(); 860 861 ArgListTy Args; 862 Args.reserve(NumArgs); 863 864 // Populate the argument list. 865 // Attributes for args start at offset 1, after the return attribute. 866 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 867 Value *V = CI->getOperand(ArgI); 868 869 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 870 871 ArgListEntry Entry; 872 Entry.Val = V; 873 Entry.Ty = V->getType(); 874 Entry.setAttributes(&CS, ArgI + 1); 875 Args.push_back(Entry); 876 } 877 878 CallLoweringInfo CLI; 879 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs); 880 881 return lowerCallTo(CLI); 882} 883 884bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 885 // Handle the incoming return values from the call. 886 CLI.clearIns(); 887 SmallVector<EVT, 4> RetTys; 888 ComputeValueVTs(TLI, CLI.RetTy, RetTys); 889 890 SmallVector<ISD::OutputArg, 4> Outs; 891 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI); 892 893 bool CanLowerReturn = TLI.CanLowerReturn( 894 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 895 896 // FIXME: sret demotion isn't supported yet - bail out. 897 if (!CanLowerReturn) 898 return false; 899 900 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 901 EVT VT = RetTys[I]; 902 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 903 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 904 for (unsigned i = 0; i != NumRegs; ++i) { 905 ISD::InputArg MyFlags; 906 MyFlags.VT = RegisterVT; 907 MyFlags.ArgVT = VT; 908 MyFlags.Used = CLI.IsReturnValueUsed; 909 if (CLI.RetSExt) 910 MyFlags.Flags.setSExt(); 911 if (CLI.RetZExt) 912 MyFlags.Flags.setZExt(); 913 if (CLI.IsInReg) 914 MyFlags.Flags.setInReg(); 915 CLI.Ins.push_back(MyFlags); 916 } 917 } 918 919 // Handle all of the outgoing arguments. 920 CLI.clearOuts(); 921 for (auto &Arg : CLI.getArgs()) { 922 Type *FinalType = Arg.Ty; 923 if (Arg.IsByVal) 924 FinalType = cast<PointerType>(Arg.Ty)->getElementType(); 925 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 926 FinalType, CLI.CallConv, CLI.IsVarArg); 927 928 ISD::ArgFlagsTy Flags; 929 if (Arg.IsZExt) 930 Flags.setZExt(); 931 if (Arg.IsSExt) 932 Flags.setSExt(); 933 if (Arg.IsInReg) 934 Flags.setInReg(); 935 if (Arg.IsSRet) 936 Flags.setSRet(); 937 if (Arg.IsByVal) 938 Flags.setByVal(); 939 if (Arg.IsInAlloca) { 940 Flags.setInAlloca(); 941 // Set the byval flag for CCAssignFn callbacks that don't know about 942 // inalloca. This way we can know how many bytes we should've allocated 943 // and how many bytes a callee cleanup function will pop. If we port 944 // inalloca to more targets, we'll have to add custom inalloca handling in 945 // the various CC lowering callbacks. 946 Flags.setByVal(); 947 } 948 if (Arg.IsByVal || Arg.IsInAlloca) { 949 PointerType *Ty = cast<PointerType>(Arg.Ty); 950 Type *ElementTy = Ty->getElementType(); 951 unsigned FrameSize = DL.getTypeAllocSize(ElementTy); 952 // For ByVal, alignment should come from FE. BE will guess if this info is 953 // not there, but there are cases it cannot get right. 954 unsigned FrameAlign = Arg.Alignment; 955 if (!FrameAlign) 956 FrameAlign = TLI.getByValTypeAlignment(ElementTy); 957 Flags.setByValSize(FrameSize); 958 Flags.setByValAlign(FrameAlign); 959 } 960 if (Arg.IsNest) 961 Flags.setNest(); 962 if (NeedsRegBlock) 963 Flags.setInConsecutiveRegs(); 964 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty); 965 Flags.setOrigAlign(OriginalAlignment); 966 967 CLI.OutVals.push_back(Arg.Val); 968 CLI.OutFlags.push_back(Flags); 969 } 970 971 if (!fastLowerCall(CLI)) 972 return false; 973 974 // Set all unused physreg defs as dead. 975 assert(CLI.Call && "No call instruction specified."); 976 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 977 978 if (CLI.NumResultRegs && CLI.CS) 979 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs); 980 981 return true; 982} 983 984bool FastISel::lowerCall(const CallInst *CI) { 985 ImmutableCallSite CS(CI); 986 987 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 988 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType()); 989 Type *RetTy = FuncTy->getReturnType(); 990 991 ArgListTy Args; 992 ArgListEntry Entry; 993 Args.reserve(CS.arg_size()); 994 995 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 996 i != e; ++i) { 997 Value *V = *i; 998 999 // Skip empty types 1000 if (V->getType()->isEmptyTy()) 1001 continue; 1002 1003 Entry.Val = V; 1004 Entry.Ty = V->getType(); 1005 1006 // Skip the first return-type Attribute to get to params. 1007 Entry.setAttributes(&CS, i - CS.arg_begin() + 1); 1008 Args.push_back(Entry); 1009 } 1010 1011 // Check if target-independent constraints permit a tail call here. 1012 // Target-dependent constraints are checked within fastLowerCall. 1013 bool IsTailCall = CI->isTailCall(); 1014 if (IsTailCall && !isInTailCallPosition(CS, TM)) 1015 IsTailCall = false; 1016 1017 CallLoweringInfo CLI; 1018 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS) 1019 .setTailCall(IsTailCall); 1020 1021 return lowerCallTo(CLI); 1022} 1023 1024bool FastISel::selectCall(const User *I) { 1025 const CallInst *Call = cast<CallInst>(I); 1026 1027 // Handle simple inline asms. 1028 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) { 1029 // If the inline asm has side effects, then make sure that no local value 1030 // lives across by flushing the local value map. 1031 if (IA->hasSideEffects()) 1032 flushLocalValueMap(); 1033 1034 // Don't attempt to handle constraints. 1035 if (!IA->getConstraintString().empty()) 1036 return false; 1037 1038 unsigned ExtraInfo = 0; 1039 if (IA->hasSideEffects()) 1040 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1041 if (IA->isAlignStack()) 1042 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1043 1044 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1045 TII.get(TargetOpcode::INLINEASM)) 1046 .addExternalSymbol(IA->getAsmString().c_str()) 1047 .addImm(ExtraInfo); 1048 return true; 1049 } 1050 1051 MachineModuleInfo &MMI = FuncInfo.MF->getMMI(); 1052 ComputeUsesVAFloatArgument(*Call, &MMI); 1053 1054 // Handle intrinsic function calls. 1055 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1056 return selectIntrinsicCall(II); 1057 1058 // Usually, it does not make sense to initialize a value, 1059 // make an unrelated function call and use the value, because 1060 // it tends to be spilled on the stack. So, we move the pointer 1061 // to the last local value to the beginning of the block, so that 1062 // all the values which have already been materialized, 1063 // appear after the call. It also makes sense to skip intrinsics 1064 // since they tend to be inlined. 1065 flushLocalValueMap(); 1066 1067 return lowerCall(Call); 1068} 1069 1070bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1071 switch (II->getIntrinsicID()) { 1072 default: 1073 break; 1074 // At -O0 we don't care about the lifetime intrinsics. 1075 case Intrinsic::lifetime_start: 1076 case Intrinsic::lifetime_end: 1077 // The donothing intrinsic does, well, nothing. 1078 case Intrinsic::donothing: 1079 return true; 1080 case Intrinsic::dbg_declare: { 1081 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1082 DIVariable DIVar(DI->getVariable()); 1083 assert((!DIVar || DIVar.isVariable()) && 1084 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1085 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) { 1086 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1087 return true; 1088 } 1089 1090 const Value *Address = DI->getAddress(); 1091 if (!Address || isa<UndefValue>(Address)) { 1092 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1093 return true; 1094 } 1095 1096 unsigned Offset = 0; 1097 Optional<MachineOperand> Op; 1098 if (const auto *Arg = dyn_cast<Argument>(Address)) 1099 // Some arguments' frame index is recorded during argument lowering. 1100 Offset = FuncInfo.getArgumentFrameIndex(Arg); 1101 if (Offset) 1102 Op = MachineOperand::CreateFI(Offset); 1103 if (!Op) 1104 if (unsigned Reg = lookUpRegForValue(Address)) 1105 Op = MachineOperand::CreateReg(Reg, false); 1106 1107 // If we have a VLA that has a "use" in a metadata node that's then used 1108 // here but it has no other uses, then we have a problem. E.g., 1109 // 1110 // int foo (const int *x) { 1111 // char a[*x]; 1112 // return 0; 1113 // } 1114 // 1115 // If we assign 'a' a vreg and fast isel later on has to use the selection 1116 // DAG isel, it will want to copy the value to the vreg. However, there are 1117 // no uses, which goes counter to what selection DAG isel expects. 1118 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1119 (!isa<AllocaInst>(Address) || 1120 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1121 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1122 false); 1123 1124 if (Op) { 1125 if (Op->isReg()) { 1126 Op->setIsDebug(true); 1127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1128 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0, 1129 DI->getVariable(), DI->getExpression()); 1130 } else 1131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1132 TII.get(TargetOpcode::DBG_VALUE)) 1133 .addOperand(*Op) 1134 .addImm(0) 1135 .addMetadata(DI->getVariable()) 1136 .addMetadata(DI->getExpression()); 1137 } else { 1138 // We can't yet handle anything else here because it would require 1139 // generating code, thus altering codegen because of debug info. 1140 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1141 } 1142 return true; 1143 } 1144 case Intrinsic::dbg_value: { 1145 // This form of DBG_VALUE is target-independent. 1146 const DbgValueInst *DI = cast<DbgValueInst>(II); 1147 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1148 const Value *V = DI->getValue(); 1149 if (!V) { 1150 // Currently the optimizer can produce this; insert an undef to 1151 // help debugging. Probably the optimizer should not do this. 1152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1153 .addReg(0U) 1154 .addImm(DI->getOffset()) 1155 .addMetadata(DI->getVariable()) 1156 .addMetadata(DI->getExpression()); 1157 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1158 if (CI->getBitWidth() > 64) 1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1160 .addCImm(CI) 1161 .addImm(DI->getOffset()) 1162 .addMetadata(DI->getVariable()) 1163 .addMetadata(DI->getExpression()); 1164 else 1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1166 .addImm(CI->getZExtValue()) 1167 .addImm(DI->getOffset()) 1168 .addMetadata(DI->getVariable()) 1169 .addMetadata(DI->getExpression()); 1170 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1172 .addFPImm(CF) 1173 .addImm(DI->getOffset()) 1174 .addMetadata(DI->getVariable()) 1175 .addMetadata(DI->getExpression()); 1176 } else if (unsigned Reg = lookUpRegForValue(V)) { 1177 // FIXME: This does not handle register-indirect values at offset 0. 1178 bool IsIndirect = DI->getOffset() != 0; 1179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, 1180 DI->getOffset(), DI->getVariable(), DI->getExpression()); 1181 } else { 1182 // We can't yet handle anything else here because it would require 1183 // generating code, thus altering codegen because of debug info. 1184 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1185 } 1186 return true; 1187 } 1188 case Intrinsic::objectsize: { 1189 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1)); 1190 unsigned long long Res = CI->isZero() ? -1ULL : 0; 1191 Constant *ResCI = ConstantInt::get(II->getType(), Res); 1192 unsigned ResultReg = getRegForValue(ResCI); 1193 if (!ResultReg) 1194 return false; 1195 updateValueMap(II, ResultReg); 1196 return true; 1197 } 1198 case Intrinsic::expect: { 1199 unsigned ResultReg = getRegForValue(II->getArgOperand(0)); 1200 if (!ResultReg) 1201 return false; 1202 updateValueMap(II, ResultReg); 1203 return true; 1204 } 1205 case Intrinsic::experimental_stackmap: 1206 return selectStackmap(II); 1207 case Intrinsic::experimental_patchpoint_void: 1208 case Intrinsic::experimental_patchpoint_i64: 1209 return selectPatchpoint(II); 1210 } 1211 1212 return fastLowerIntrinsicCall(II); 1213} 1214 1215bool FastISel::selectCast(const User *I, unsigned Opcode) { 1216 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1217 EVT DstVT = TLI.getValueType(I->getType()); 1218 1219 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1220 !DstVT.isSimple()) 1221 // Unhandled type. Halt "fast" selection and bail. 1222 return false; 1223 1224 // Check if the destination type is legal. 1225 if (!TLI.isTypeLegal(DstVT)) 1226 return false; 1227 1228 // Check if the source operand is legal. 1229 if (!TLI.isTypeLegal(SrcVT)) 1230 return false; 1231 1232 unsigned InputReg = getRegForValue(I->getOperand(0)); 1233 if (!InputReg) 1234 // Unhandled operand. Halt "fast" selection and bail. 1235 return false; 1236 1237 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 1238 1239 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1240 Opcode, InputReg, InputRegIsKill); 1241 if (!ResultReg) 1242 return false; 1243 1244 updateValueMap(I, ResultReg); 1245 return true; 1246} 1247 1248bool FastISel::selectBitCast(const User *I) { 1249 // If the bitcast doesn't change the type, just use the operand value. 1250 if (I->getType() == I->getOperand(0)->getType()) { 1251 unsigned Reg = getRegForValue(I->getOperand(0)); 1252 if (!Reg) 1253 return false; 1254 updateValueMap(I, Reg); 1255 return true; 1256 } 1257 1258 // Bitcasts of other values become reg-reg copies or BITCAST operators. 1259 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType()); 1260 EVT DstEVT = TLI.getValueType(I->getType()); 1261 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1262 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1263 // Unhandled type. Halt "fast" selection and bail. 1264 return false; 1265 1266 MVT SrcVT = SrcEVT.getSimpleVT(); 1267 MVT DstVT = DstEVT.getSimpleVT(); 1268 unsigned Op0 = getRegForValue(I->getOperand(0)); 1269 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1270 return false; 1271 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 1272 1273 // First, try to perform the bitcast by inserting a reg-reg copy. 1274 unsigned ResultReg = 0; 1275 if (SrcVT == DstVT) { 1276 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT); 1277 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); 1278 // Don't attempt a cross-class copy. It will likely fail. 1279 if (SrcClass == DstClass) { 1280 ResultReg = createResultReg(DstClass); 1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1282 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); 1283 } 1284 } 1285 1286 // If the reg-reg copy failed, select a BITCAST opcode. 1287 if (!ResultReg) 1288 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); 1289 1290 if (!ResultReg) 1291 return false; 1292 1293 updateValueMap(I, ResultReg); 1294 return true; 1295} 1296 1297bool FastISel::selectInstruction(const Instruction *I) { 1298 // Just before the terminator instruction, insert instructions to 1299 // feed PHI nodes in successor blocks. 1300 if (isa<TerminatorInst>(I)) 1301 if (!handlePHINodesInSuccessorBlocks(I->getParent())) 1302 return false; 1303 1304 DbgLoc = I->getDebugLoc(); 1305 1306 SavedInsertPt = FuncInfo.InsertPt; 1307 1308 if (const auto *Call = dyn_cast<CallInst>(I)) { 1309 const Function *F = Call->getCalledFunction(); 1310 LibFunc::Func Func; 1311 1312 // As a special case, don't handle calls to builtin library functions that 1313 // may be translated directly to target instructions. 1314 if (F && !F->hasLocalLinkage() && F->hasName() && 1315 LibInfo->getLibFunc(F->getName(), Func) && 1316 LibInfo->hasOptimizedCodeGen(Func)) 1317 return false; 1318 1319 // Don't handle Intrinsic::trap if a trap funciton is specified. 1320 if (F && F->getIntrinsicID() == Intrinsic::trap && 1321 !TM.Options.getTrapFunctionName().empty()) 1322 return false; 1323 } 1324 1325 // First, try doing target-independent selection. 1326 if (!SkipTargetIndependentISel) { 1327 if (selectOperator(I, I->getOpcode())) { 1328 ++NumFastIselSuccessIndependent; 1329 DbgLoc = DebugLoc(); 1330 return true; 1331 } 1332 // Remove dead code. 1333 recomputeInsertPt(); 1334 if (SavedInsertPt != FuncInfo.InsertPt) 1335 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1336 SavedInsertPt = FuncInfo.InsertPt; 1337 } 1338 // Next, try calling the target to attempt to handle the instruction. 1339 if (fastSelectInstruction(I)) { 1340 ++NumFastIselSuccessTarget; 1341 DbgLoc = DebugLoc(); 1342 return true; 1343 } 1344 // Remove dead code. 1345 recomputeInsertPt(); 1346 if (SavedInsertPt != FuncInfo.InsertPt) 1347 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1348 1349 DbgLoc = DebugLoc(); 1350 // Undo phi node updates, because they will be added again by SelectionDAG. 1351 if (isa<TerminatorInst>(I)) 1352 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1353 return false; 1354} 1355 1356/// Emit an unconditional branch to the given block, unless it is the immediate 1357/// (fall-through) successor, and update the CFG. 1358void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) { 1359 if (FuncInfo.MBB->getBasicBlock()->size() > 1 && 1360 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1361 // For more accurate line information if this is the only instruction 1362 // in the block then emit it, otherwise we have the unconditional 1363 // fall-through case, which needs no instructions. 1364 } else { 1365 // The unconditional branch case. 1366 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr, 1367 SmallVector<MachineOperand, 0>(), DbgLoc); 1368 } 1369 uint32_t BranchWeight = 0; 1370 if (FuncInfo.BPI) 1371 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(), 1372 MSucc->getBasicBlock()); 1373 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight); 1374} 1375 1376/// Emit an FNeg operation. 1377bool FastISel::selectFNeg(const User *I) { 1378 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); 1379 if (!OpReg) 1380 return false; 1381 bool OpRegIsKill = hasTrivialKill(I); 1382 1383 // If the target has ISD::FNEG, use it. 1384 EVT VT = TLI.getValueType(I->getType()); 1385 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1386 OpReg, OpRegIsKill); 1387 if (ResultReg) { 1388 updateValueMap(I, ResultReg); 1389 return true; 1390 } 1391 1392 // Bitcast the value to integer, twiddle the sign bit with xor, 1393 // and then bitcast it back to floating-point. 1394 if (VT.getSizeInBits() > 64) 1395 return false; 1396 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1397 if (!TLI.isTypeLegal(IntVT)) 1398 return false; 1399 1400 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1401 ISD::BITCAST, OpReg, OpRegIsKill); 1402 if (!IntReg) 1403 return false; 1404 1405 unsigned IntResultReg = fastEmit_ri_( 1406 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true, 1407 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1408 if (!IntResultReg) 1409 return false; 1410 1411 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1412 IntResultReg, /*IsKill=*/true); 1413 if (!ResultReg) 1414 return false; 1415 1416 updateValueMap(I, ResultReg); 1417 return true; 1418} 1419 1420bool FastISel::selectExtractValue(const User *U) { 1421 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1422 if (!EVI) 1423 return false; 1424 1425 // Make sure we only try to handle extracts with a legal result. But also 1426 // allow i1 because it's easy. 1427 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true); 1428 if (!RealVT.isSimple()) 1429 return false; 1430 MVT VT = RealVT.getSimpleVT(); 1431 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1432 return false; 1433 1434 const Value *Op0 = EVI->getOperand(0); 1435 Type *AggTy = Op0->getType(); 1436 1437 // Get the base result register. 1438 unsigned ResultReg; 1439 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0); 1440 if (I != FuncInfo.ValueMap.end()) 1441 ResultReg = I->second; 1442 else if (isa<Instruction>(Op0)) 1443 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1444 else 1445 return false; // fast-isel can't handle aggregate constants at the moment 1446 1447 // Get the actual result register, which is an offset from the base register. 1448 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1449 1450 SmallVector<EVT, 4> AggValueVTs; 1451 ComputeValueVTs(TLI, AggTy, AggValueVTs); 1452 1453 for (unsigned i = 0; i < VTIndex; i++) 1454 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1455 1456 updateValueMap(EVI, ResultReg); 1457 return true; 1458} 1459 1460bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1461 switch (Opcode) { 1462 case Instruction::Add: 1463 return selectBinaryOp(I, ISD::ADD); 1464 case Instruction::FAdd: 1465 return selectBinaryOp(I, ISD::FADD); 1466 case Instruction::Sub: 1467 return selectBinaryOp(I, ISD::SUB); 1468 case Instruction::FSub: 1469 // FNeg is currently represented in LLVM IR as a special case of FSub. 1470 if (BinaryOperator::isFNeg(I)) 1471 return selectFNeg(I); 1472 return selectBinaryOp(I, ISD::FSUB); 1473 case Instruction::Mul: 1474 return selectBinaryOp(I, ISD::MUL); 1475 case Instruction::FMul: 1476 return selectBinaryOp(I, ISD::FMUL); 1477 case Instruction::SDiv: 1478 return selectBinaryOp(I, ISD::SDIV); 1479 case Instruction::UDiv: 1480 return selectBinaryOp(I, ISD::UDIV); 1481 case Instruction::FDiv: 1482 return selectBinaryOp(I, ISD::FDIV); 1483 case Instruction::SRem: 1484 return selectBinaryOp(I, ISD::SREM); 1485 case Instruction::URem: 1486 return selectBinaryOp(I, ISD::UREM); 1487 case Instruction::FRem: 1488 return selectBinaryOp(I, ISD::FREM); 1489 case Instruction::Shl: 1490 return selectBinaryOp(I, ISD::SHL); 1491 case Instruction::LShr: 1492 return selectBinaryOp(I, ISD::SRL); 1493 case Instruction::AShr: 1494 return selectBinaryOp(I, ISD::SRA); 1495 case Instruction::And: 1496 return selectBinaryOp(I, ISD::AND); 1497 case Instruction::Or: 1498 return selectBinaryOp(I, ISD::OR); 1499 case Instruction::Xor: 1500 return selectBinaryOp(I, ISD::XOR); 1501 1502 case Instruction::GetElementPtr: 1503 return selectGetElementPtr(I); 1504 1505 case Instruction::Br: { 1506 const BranchInst *BI = cast<BranchInst>(I); 1507 1508 if (BI->isUnconditional()) { 1509 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1510 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1511 fastEmitBranch(MSucc, BI->getDebugLoc()); 1512 return true; 1513 } 1514 1515 // Conditional branches are not handed yet. 1516 // Halt "fast" selection and bail. 1517 return false; 1518 } 1519 1520 case Instruction::Unreachable: 1521 if (TM.Options.TrapUnreachable) 1522 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1523 else 1524 return true; 1525 1526 case Instruction::Alloca: 1527 // FunctionLowering has the static-sized case covered. 1528 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1529 return true; 1530 1531 // Dynamic-sized alloca is not handled yet. 1532 return false; 1533 1534 case Instruction::Call: 1535 return selectCall(I); 1536 1537 case Instruction::BitCast: 1538 return selectBitCast(I); 1539 1540 case Instruction::FPToSI: 1541 return selectCast(I, ISD::FP_TO_SINT); 1542 case Instruction::ZExt: 1543 return selectCast(I, ISD::ZERO_EXTEND); 1544 case Instruction::SExt: 1545 return selectCast(I, ISD::SIGN_EXTEND); 1546 case Instruction::Trunc: 1547 return selectCast(I, ISD::TRUNCATE); 1548 case Instruction::SIToFP: 1549 return selectCast(I, ISD::SINT_TO_FP); 1550 1551 case Instruction::IntToPtr: // Deliberate fall-through. 1552 case Instruction::PtrToInt: { 1553 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1554 EVT DstVT = TLI.getValueType(I->getType()); 1555 if (DstVT.bitsGT(SrcVT)) 1556 return selectCast(I, ISD::ZERO_EXTEND); 1557 if (DstVT.bitsLT(SrcVT)) 1558 return selectCast(I, ISD::TRUNCATE); 1559 unsigned Reg = getRegForValue(I->getOperand(0)); 1560 if (!Reg) 1561 return false; 1562 updateValueMap(I, Reg); 1563 return true; 1564 } 1565 1566 case Instruction::ExtractValue: 1567 return selectExtractValue(I); 1568 1569 case Instruction::PHI: 1570 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1571 1572 default: 1573 // Unhandled instruction. Halt "fast" selection and bail. 1574 return false; 1575 } 1576} 1577 1578FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1579 const TargetLibraryInfo *LibInfo, 1580 bool SkipTargetIndependentISel) 1581 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1582 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1583 TM(FuncInfo.MF->getTarget()), DL(*MF->getSubtarget().getDataLayout()), 1584 TII(*MF->getSubtarget().getInstrInfo()), 1585 TLI(*MF->getSubtarget().getTargetLowering()), 1586 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1587 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1588 1589FastISel::~FastISel() {} 1590 1591bool FastISel::fastLowerArguments() { return false; } 1592 1593bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1594 1595bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1596 return false; 1597} 1598 1599unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1600 1601unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, 1602 bool /*Op0IsKill*/) { 1603 return 0; 1604} 1605 1606unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 1607 bool /*Op0IsKill*/, unsigned /*Op1*/, 1608 bool /*Op1IsKill*/) { 1609 return 0; 1610} 1611 1612unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1613 return 0; 1614} 1615 1616unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 1617 const ConstantFP * /*FPImm*/) { 1618 return 0; 1619} 1620 1621unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 1622 bool /*Op0IsKill*/, uint64_t /*Imm*/) { 1623 return 0; 1624} 1625 1626unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/, 1627 bool /*Op0IsKill*/, 1628 const ConstantFP * /*FPImm*/) { 1629 return 0; 1630} 1631 1632unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/, 1633 bool /*Op0IsKill*/, unsigned /*Op1*/, 1634 bool /*Op1IsKill*/, uint64_t /*Imm*/) { 1635 return 0; 1636} 1637 1638/// This method is a wrapper of fastEmit_ri. It first tries to emit an 1639/// instruction with an immediate operand using fastEmit_ri. 1640/// If that fails, it materializes the immediate into a register and try 1641/// fastEmit_rr instead. 1642unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 1643 bool Op0IsKill, uint64_t Imm, MVT ImmType) { 1644 // If this is a multiply by a power of two, emit this as a shift left. 1645 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1646 Opcode = ISD::SHL; 1647 Imm = Log2_64(Imm); 1648 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1649 // div x, 8 -> srl x, 3 1650 Opcode = ISD::SRL; 1651 Imm = Log2_64(Imm); 1652 } 1653 1654 // Horrible hack (to be removed), check to make sure shift amounts are 1655 // in-range. 1656 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1657 Imm >= VT.getSizeInBits()) 1658 return 0; 1659 1660 // First check if immediate type is legal. If not, we can't use the ri form. 1661 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 1662 if (ResultReg) 1663 return ResultReg; 1664 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1665 if (!MaterialReg) { 1666 // This is a bit ugly/slow, but failing here means falling out of 1667 // fast-isel, which would be very slow. 1668 IntegerType *ITy = 1669 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1670 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1671 if (!MaterialReg) 1672 return 0; 1673 } 1674 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, 1675 /*IsKill=*/true); 1676} 1677 1678unsigned FastISel::createResultReg(const TargetRegisterClass *RC) { 1679 return MRI.createVirtualRegister(RC); 1680} 1681 1682unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, 1683 unsigned OpNum) { 1684 if (TargetRegisterInfo::isVirtualRegister(Op)) { 1685 const TargetRegisterClass *RegClass = 1686 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1687 if (!MRI.constrainRegClass(Op, RegClass)) { 1688 // If it's not legal to COPY between the register classes, something 1689 // has gone very wrong before we got here. 1690 unsigned NewOp = createResultReg(RegClass); 1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1692 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1693 return NewOp; 1694 } 1695 } 1696 return Op; 1697} 1698 1699unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1700 const TargetRegisterClass *RC) { 1701 unsigned ResultReg = createResultReg(RC); 1702 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1703 1704 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); 1705 return ResultReg; 1706} 1707 1708unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1709 const TargetRegisterClass *RC, unsigned Op0, 1710 bool Op0IsKill) { 1711 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1712 1713 unsigned ResultReg = createResultReg(RC); 1714 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1715 1716 if (II.getNumDefs() >= 1) 1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1718 .addReg(Op0, getKillRegState(Op0IsKill)); 1719 else { 1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1721 .addReg(Op0, getKillRegState(Op0IsKill)); 1722 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1723 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1724 } 1725 1726 return ResultReg; 1727} 1728 1729unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1730 const TargetRegisterClass *RC, unsigned Op0, 1731 bool Op0IsKill, unsigned Op1, 1732 bool Op1IsKill) { 1733 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1734 1735 unsigned ResultReg = createResultReg(RC); 1736 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1737 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1738 1739 if (II.getNumDefs() >= 1) 1740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1741 .addReg(Op0, getKillRegState(Op0IsKill)) 1742 .addReg(Op1, getKillRegState(Op1IsKill)); 1743 else { 1744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1745 .addReg(Op0, getKillRegState(Op0IsKill)) 1746 .addReg(Op1, getKillRegState(Op1IsKill)); 1747 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1748 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1749 } 1750 return ResultReg; 1751} 1752 1753unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 1754 const TargetRegisterClass *RC, unsigned Op0, 1755 bool Op0IsKill, unsigned Op1, 1756 bool Op1IsKill, unsigned Op2, 1757 bool Op2IsKill) { 1758 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1759 1760 unsigned ResultReg = createResultReg(RC); 1761 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1762 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1763 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 1764 1765 if (II.getNumDefs() >= 1) 1766 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1767 .addReg(Op0, getKillRegState(Op0IsKill)) 1768 .addReg(Op1, getKillRegState(Op1IsKill)) 1769 .addReg(Op2, getKillRegState(Op2IsKill)); 1770 else { 1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1772 .addReg(Op0, getKillRegState(Op0IsKill)) 1773 .addReg(Op1, getKillRegState(Op1IsKill)) 1774 .addReg(Op2, getKillRegState(Op2IsKill)); 1775 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1776 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1777 } 1778 return ResultReg; 1779} 1780 1781unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 1782 const TargetRegisterClass *RC, unsigned Op0, 1783 bool Op0IsKill, uint64_t Imm) { 1784 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1785 1786 unsigned ResultReg = createResultReg(RC); 1787 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1788 1789 if (II.getNumDefs() >= 1) 1790 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1791 .addReg(Op0, getKillRegState(Op0IsKill)) 1792 .addImm(Imm); 1793 else { 1794 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1795 .addReg(Op0, getKillRegState(Op0IsKill)) 1796 .addImm(Imm); 1797 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1798 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1799 } 1800 return ResultReg; 1801} 1802 1803unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 1804 const TargetRegisterClass *RC, unsigned Op0, 1805 bool Op0IsKill, uint64_t Imm1, 1806 uint64_t Imm2) { 1807 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1808 1809 unsigned ResultReg = createResultReg(RC); 1810 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1811 1812 if (II.getNumDefs() >= 1) 1813 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1814 .addReg(Op0, getKillRegState(Op0IsKill)) 1815 .addImm(Imm1) 1816 .addImm(Imm2); 1817 else { 1818 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1819 .addReg(Op0, getKillRegState(Op0IsKill)) 1820 .addImm(Imm1) 1821 .addImm(Imm2); 1822 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1823 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1824 } 1825 return ResultReg; 1826} 1827 1828unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode, 1829 const TargetRegisterClass *RC, unsigned Op0, 1830 bool Op0IsKill, const ConstantFP *FPImm) { 1831 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1832 1833 unsigned ResultReg = createResultReg(RC); 1834 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1835 1836 if (II.getNumDefs() >= 1) 1837 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1838 .addReg(Op0, getKillRegState(Op0IsKill)) 1839 .addFPImm(FPImm); 1840 else { 1841 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1842 .addReg(Op0, getKillRegState(Op0IsKill)) 1843 .addFPImm(FPImm); 1844 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1845 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1846 } 1847 return ResultReg; 1848} 1849 1850unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 1851 const TargetRegisterClass *RC, unsigned Op0, 1852 bool Op0IsKill, unsigned Op1, 1853 bool Op1IsKill, uint64_t Imm) { 1854 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1855 1856 unsigned ResultReg = createResultReg(RC); 1857 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1858 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1859 1860 if (II.getNumDefs() >= 1) 1861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1862 .addReg(Op0, getKillRegState(Op0IsKill)) 1863 .addReg(Op1, getKillRegState(Op1IsKill)) 1864 .addImm(Imm); 1865 else { 1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1867 .addReg(Op0, getKillRegState(Op0IsKill)) 1868 .addReg(Op1, getKillRegState(Op1IsKill)) 1869 .addImm(Imm); 1870 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1871 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1872 } 1873 return ResultReg; 1874} 1875 1876unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode, 1877 const TargetRegisterClass *RC, 1878 unsigned Op0, bool Op0IsKill, unsigned Op1, 1879 bool Op1IsKill, uint64_t Imm1, 1880 uint64_t Imm2) { 1881 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1882 1883 unsigned ResultReg = createResultReg(RC); 1884 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1885 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1886 1887 if (II.getNumDefs() >= 1) 1888 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1889 .addReg(Op0, getKillRegState(Op0IsKill)) 1890 .addReg(Op1, getKillRegState(Op1IsKill)) 1891 .addImm(Imm1) 1892 .addImm(Imm2); 1893 else { 1894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1895 .addReg(Op0, getKillRegState(Op0IsKill)) 1896 .addReg(Op1, getKillRegState(Op1IsKill)) 1897 .addImm(Imm1) 1898 .addImm(Imm2); 1899 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1900 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1901 } 1902 return ResultReg; 1903} 1904 1905unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 1906 const TargetRegisterClass *RC, uint64_t Imm) { 1907 unsigned ResultReg = createResultReg(RC); 1908 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1909 1910 if (II.getNumDefs() >= 1) 1911 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1912 .addImm(Imm); 1913 else { 1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); 1915 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1916 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1917 } 1918 return ResultReg; 1919} 1920 1921unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode, 1922 const TargetRegisterClass *RC, uint64_t Imm1, 1923 uint64_t Imm2) { 1924 unsigned ResultReg = createResultReg(RC); 1925 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1926 1927 if (II.getNumDefs() >= 1) 1928 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1929 .addImm(Imm1) 1930 .addImm(Imm2); 1931 else { 1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1) 1933 .addImm(Imm2); 1934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1935 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1936 } 1937 return ResultReg; 1938} 1939 1940unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 1941 bool Op0IsKill, uint32_t Idx) { 1942 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 1943 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 1944 "Cannot yet extract from physregs"); 1945 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 1946 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 1947 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), 1948 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); 1949 return ResultReg; 1950} 1951 1952/// Emit MachineInstrs to compute the value of Op with all but the least 1953/// significant bit set to zero. 1954unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 1955 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 1956} 1957 1958/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 1959/// Emit code to ensure constants are copied into registers when needed. 1960/// Remember the virtual registers that need to be added to the Machine PHI 1961/// nodes as input. We cannot just directly add them, because expansion 1962/// might result in multiple MBB's for one BB. As such, the start of the 1963/// BB might correspond to a different MBB than the end. 1964bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 1965 const TerminatorInst *TI = LLVMBB->getTerminator(); 1966 1967 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 1968 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 1969 1970 // Check successor nodes' PHI nodes that expect a constant to be available 1971 // from this block. 1972 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 1973 const BasicBlock *SuccBB = TI->getSuccessor(succ); 1974 if (!isa<PHINode>(SuccBB->begin())) 1975 continue; 1976 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 1977 1978 // If this terminator has multiple identical successors (common for 1979 // switches), only handle each succ once. 1980 if (!SuccsHandled.insert(SuccMBB).second) 1981 continue; 1982 1983 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 1984 1985 // At this point we know that there is a 1-1 correspondence between LLVM PHI 1986 // nodes and Machine PHI nodes, but the incoming operands have not been 1987 // emitted yet. 1988 for (BasicBlock::const_iterator I = SuccBB->begin(); 1989 const auto *PN = dyn_cast<PHINode>(I); ++I) { 1990 1991 // Ignore dead phi's. 1992 if (PN->use_empty()) 1993 continue; 1994 1995 // Only handle legal types. Two interesting things to note here. First, 1996 // by bailing out early, we may leave behind some dead instructions, 1997 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 1998 // own moves. Second, this check is necessary because FastISel doesn't 1999 // use CreateRegs to create registers, so it always creates 2000 // exactly one register for each non-void instruction. 2001 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); 2002 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2003 // Handle integer promotions, though, because they're common and easy. 2004 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2005 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2006 return false; 2007 } 2008 } 2009 2010 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 2011 2012 // Set the DebugLoc for the copy. Prefer the location of the operand 2013 // if there is one; use the location of the PHI otherwise. 2014 DbgLoc = PN->getDebugLoc(); 2015 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2016 DbgLoc = Inst->getDebugLoc(); 2017 2018 unsigned Reg = getRegForValue(PHIOp); 2019 if (!Reg) { 2020 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2021 return false; 2022 } 2023 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); 2024 DbgLoc = DebugLoc(); 2025 } 2026 } 2027 2028 return true; 2029} 2030 2031bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2032 assert(LI->hasOneUse() && 2033 "tryToFoldLoad expected a LoadInst with a single use"); 2034 // We know that the load has a single use, but don't know what it is. If it 2035 // isn't one of the folded instructions, then we can't succeed here. Handle 2036 // this by scanning the single-use users of the load until we get to FoldInst. 2037 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2038 2039 const Instruction *TheUser = LI->user_back(); 2040 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2041 // Stay in the right block. 2042 TheUser->getParent() == FoldInst->getParent() && 2043 --MaxUsers) { // Don't scan too far. 2044 // If there are multiple or no uses of this instruction, then bail out. 2045 if (!TheUser->hasOneUse()) 2046 return false; 2047 2048 TheUser = TheUser->user_back(); 2049 } 2050 2051 // If we didn't find the fold instruction, then we failed to collapse the 2052 // sequence. 2053 if (TheUser != FoldInst) 2054 return false; 2055 2056 // Don't try to fold volatile loads. Target has to deal with alignment 2057 // constraints. 2058 if (LI->isVolatile()) 2059 return false; 2060 2061 // Figure out which vreg this is going into. If there is no assigned vreg yet 2062 // then there actually was no reference to it. Perhaps the load is referenced 2063 // by a dead instruction. 2064 unsigned LoadReg = getRegForValue(LI); 2065 if (!LoadReg) 2066 return false; 2067 2068 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2069 // may mean that the instruction got lowered to multiple MIs, or the use of 2070 // the loaded value ended up being multiple operands of the result. 2071 if (!MRI.hasOneUse(LoadReg)) 2072 return false; 2073 2074 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2075 MachineInstr *User = RI->getParent(); 2076 2077 // Set the insertion point properly. Folding the load can cause generation of 2078 // other random instructions (like sign extends) for addressing modes; make 2079 // sure they get inserted in a logical place before the new instruction. 2080 FuncInfo.InsertPt = User; 2081 FuncInfo.MBB = User->getParent(); 2082 2083 // Ask the target to try folding the load. 2084 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2085} 2086 2087bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2088 // Must be an add. 2089 if (!isa<AddOperator>(Add)) 2090 return false; 2091 // Type size needs to match. 2092 if (DL.getTypeSizeInBits(GEP->getType()) != 2093 DL.getTypeSizeInBits(Add->getType())) 2094 return false; 2095 // Must be in the same basic block. 2096 if (isa<Instruction>(Add) && 2097 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2098 return false; 2099 // Must have a constant operand. 2100 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2101} 2102 2103MachineMemOperand * 2104FastISel::createMachineMemOperandFor(const Instruction *I) const { 2105 const Value *Ptr; 2106 Type *ValTy; 2107 unsigned Alignment; 2108 unsigned Flags; 2109 bool IsVolatile; 2110 2111 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2112 Alignment = LI->getAlignment(); 2113 IsVolatile = LI->isVolatile(); 2114 Flags = MachineMemOperand::MOLoad; 2115 Ptr = LI->getPointerOperand(); 2116 ValTy = LI->getType(); 2117 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2118 Alignment = SI->getAlignment(); 2119 IsVolatile = SI->isVolatile(); 2120 Flags = MachineMemOperand::MOStore; 2121 Ptr = SI->getPointerOperand(); 2122 ValTy = SI->getValueOperand()->getType(); 2123 } else 2124 return nullptr; 2125 2126 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr; 2127 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 2128 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2129 2130 AAMDNodes AAInfo; 2131 I->getAAMetadata(AAInfo); 2132 2133 if (Alignment == 0) // Ensure that codegen never sees alignment 0. 2134 Alignment = DL.getABITypeAlignment(ValTy); 2135 2136 unsigned Size = DL.getTypeStoreSize(ValTy); 2137 2138 if (IsVolatile) 2139 Flags |= MachineMemOperand::MOVolatile; 2140 if (IsNonTemporal) 2141 Flags |= MachineMemOperand::MONonTemporal; 2142 if (IsInvariant) 2143 Flags |= MachineMemOperand::MOInvariant; 2144 2145 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2146 Alignment, AAInfo, Ranges); 2147} 2148 2149CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2150 // If both operands are the same, then try to optimize or fold the cmp. 2151 CmpInst::Predicate Predicate = CI->getPredicate(); 2152 if (CI->getOperand(0) != CI->getOperand(1)) 2153 return Predicate; 2154 2155 switch (Predicate) { 2156 default: llvm_unreachable("Invalid predicate!"); 2157 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2158 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2159 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2160 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2161 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2162 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2163 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2164 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2165 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2166 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2167 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2168 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2169 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2170 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2171 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2172 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2173 2174 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2175 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2176 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2177 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2178 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2179 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2180 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2181 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2182 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2183 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2184 } 2185 2186 return Predicate; 2187}
| 523 if (TotalOffs >= MaxOffs) { 524 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 525 if (!N) // Unhandled operand. Halt "fast" selection and bail. 526 return false; 527 NIsKill = true; 528 TotalOffs = 0; 529 } 530 continue; 531 } 532 if (TotalOffs) { 533 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 534 if (!N) // Unhandled operand. Halt "fast" selection and bail. 535 return false; 536 NIsKill = true; 537 TotalOffs = 0; 538 } 539 540 // N = N + Idx * ElementSize; 541 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 542 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); 543 unsigned IdxN = Pair.first; 544 bool IdxNIsKill = Pair.second; 545 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 546 return false; 547 548 if (ElementSize != 1) { 549 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 550 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 551 return false; 552 IdxNIsKill = true; 553 } 554 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 555 if (!N) // Unhandled operand. Halt "fast" selection and bail. 556 return false; 557 } 558 } 559 if (TotalOffs) { 560 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 561 if (!N) // Unhandled operand. Halt "fast" selection and bail. 562 return false; 563 } 564 565 // We successfully emitted code for the given LLVM Instruction. 566 updateValueMap(I, N); 567 return true; 568} 569 570bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 571 const CallInst *CI, unsigned StartIdx) { 572 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) { 573 Value *Val = CI->getArgOperand(i); 574 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 575 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 576 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 577 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 578 } else if (isa<ConstantPointerNull>(Val)) { 579 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 580 Ops.push_back(MachineOperand::CreateImm(0)); 581 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 582 // Values coming from a stack location also require a sepcial encoding, 583 // but that is added later on by the target specific frame index 584 // elimination implementation. 585 auto SI = FuncInfo.StaticAllocaMap.find(AI); 586 if (SI != FuncInfo.StaticAllocaMap.end()) 587 Ops.push_back(MachineOperand::CreateFI(SI->second)); 588 else 589 return false; 590 } else { 591 unsigned Reg = getRegForValue(Val); 592 if (!Reg) 593 return false; 594 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 595 } 596 } 597 return true; 598} 599 600bool FastISel::selectStackmap(const CallInst *I) { 601 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 602 // [live variables...]) 603 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 604 "Stackmap cannot return a value."); 605 606 // The stackmap intrinsic only records the live variables (the arguments 607 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 608 // intrinsic, this won't be lowered to a function call. This means we don't 609 // have to worry about calling conventions and target-specific lowering code. 610 // Instead we perform the call lowering right here. 611 // 612 // CALLSEQ_START(0) 613 // STACKMAP(id, nbytes, ...) 614 // CALLSEQ_END(0, 0) 615 // 616 SmallVector<MachineOperand, 32> Ops; 617 618 // Add the <id> and <numBytes> constants. 619 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 620 "Expected a constant integer."); 621 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 622 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 623 624 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 625 "Expected a constant integer."); 626 const auto *NumBytes = 627 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 628 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 629 630 // Push live variables for the stack map (skipping the first two arguments 631 // <id> and <numBytes>). 632 if (!addStackMapLiveVars(Ops, I, 2)) 633 return false; 634 635 // We are not adding any register mask info here, because the stackmap doesn't 636 // clobber anything. 637 638 // Add scratch registers as implicit def and early clobber. 639 CallingConv::ID CC = I->getCallingConv(); 640 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 641 for (unsigned i = 0; ScratchRegs[i]; ++i) 642 Ops.push_back(MachineOperand::CreateReg( 643 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 644 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 645 646 // Issue CALLSEQ_START 647 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) 649 .addImm(0); 650 651 // Issue STACKMAP. 652 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 653 TII.get(TargetOpcode::STACKMAP)); 654 for (auto const &MO : Ops) 655 MIB.addOperand(MO); 656 657 // Issue CALLSEQ_END 658 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 660 .addImm(0) 661 .addImm(0); 662 663 // Inform the Frame Information that we have a stackmap in this function. 664 FuncInfo.MF->getFrameInfo()->setHasStackMap(); 665 666 return true; 667} 668 669/// \brief Lower an argument list according to the target calling convention. 670/// 671/// This is a helper for lowering intrinsics that follow a target calling 672/// convention or require stack pointer adjustment. Only a subset of the 673/// intrinsic's operands need to participate in the calling convention. 674bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 675 unsigned NumArgs, const Value *Callee, 676 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 677 ArgListTy Args; 678 Args.reserve(NumArgs); 679 680 // Populate the argument list. 681 // Attributes for args start at offset 1, after the return attribute. 682 ImmutableCallSite CS(CI); 683 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1; 684 ArgI != ArgE; ++ArgI) { 685 Value *V = CI->getOperand(ArgI); 686 687 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 688 689 ArgListEntry Entry; 690 Entry.Val = V; 691 Entry.Ty = V->getType(); 692 Entry.setAttributes(&CS, AttrI); 693 Args.push_back(Entry); 694 } 695 696 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 697 : CI->getType(); 698 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 699 700 return lowerCallTo(CLI); 701} 702 703bool FastISel::selectPatchpoint(const CallInst *I) { 704 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 705 // i32 <numBytes>, 706 // i8* <target>, 707 // i32 <numArgs>, 708 // [Args...], 709 // [live variables...]) 710 CallingConv::ID CC = I->getCallingConv(); 711 bool IsAnyRegCC = CC == CallingConv::AnyReg; 712 bool HasDef = !I->getType()->isVoidTy(); 713 Value *Callee = I->getOperand(PatchPointOpers::TargetPos); 714 715 // Get the real number of arguments participating in the call <numArgs> 716 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 717 "Expected a constant integer."); 718 const auto *NumArgsVal = 719 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 720 unsigned NumArgs = NumArgsVal->getZExtValue(); 721 722 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 723 // This includes all meta-operands up to but not including CC. 724 unsigned NumMetaOpers = PatchPointOpers::CCPos; 725 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs && 726 "Not enough arguments provided to the patchpoint intrinsic"); 727 728 // For AnyRegCC the arguments are lowered later on manually. 729 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 730 CallLoweringInfo CLI; 731 CLI.setIsPatchPoint(); 732 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 733 return false; 734 735 assert(CLI.Call && "No call instruction specified."); 736 737 SmallVector<MachineOperand, 32> Ops; 738 739 // Add an explicit result reg if we use the anyreg calling convention. 740 if (IsAnyRegCC && HasDef) { 741 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 742 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 743 CLI.NumResultRegs = 1; 744 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true)); 745 } 746 747 // Add the <id> and <numBytes> constants. 748 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 749 "Expected a constant integer."); 750 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 751 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 752 753 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 754 "Expected a constant integer."); 755 const auto *NumBytes = 756 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 757 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 758 759 // Assume that the callee is a constant address or null pointer. 760 // FIXME: handle function symbols in the future. 761 uint64_t CalleeAddr; 762 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) 763 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 764 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 765 if (C->getOpcode() == Instruction::IntToPtr) 766 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 767 else 768 llvm_unreachable("Unsupported ConstantExpr."); 769 } else if (isa<ConstantPointerNull>(Callee)) 770 CalleeAddr = 0; 771 else 772 llvm_unreachable("Unsupported callee address."); 773 774 Ops.push_back(MachineOperand::CreateImm(CalleeAddr)); 775 776 // Adjust <numArgs> to account for any arguments that have been passed on 777 // the stack instead. 778 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 779 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 780 781 // Add the calling convention 782 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 783 784 // Add the arguments we omitted previously. The register allocator should 785 // place these in any free register. 786 if (IsAnyRegCC) { 787 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 788 unsigned Reg = getRegForValue(I->getArgOperand(i)); 789 if (!Reg) 790 return false; 791 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 792 } 793 } 794 795 // Push the arguments from the call instruction. 796 for (auto Reg : CLI.OutRegs) 797 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); 798 799 // Push live variables for the stack map. 800 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 801 return false; 802 803 // Push the register mask info. 804 Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC))); 805 806 // Add scratch registers as implicit def and early clobber. 807 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 808 for (unsigned i = 0; ScratchRegs[i]; ++i) 809 Ops.push_back(MachineOperand::CreateReg( 810 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, 811 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); 812 813 // Add implicit defs (return values). 814 for (auto Reg : CLI.InRegs) 815 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true, 816 /*IsImpl=*/true)); 817 818 // Insert the patchpoint instruction before the call generated by the target. 819 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, 820 TII.get(TargetOpcode::PATCHPOINT)); 821 822 for (auto &MO : Ops) 823 MIB.addOperand(MO); 824 825 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 826 827 // Delete the original call instruction. 828 CLI.Call->eraseFromParent(); 829 830 // Inform the Frame Information that we have a patchpoint in this function. 831 FuncInfo.MF->getFrameInfo()->setHasPatchPoint(); 832 833 if (CLI.NumResultRegs) 834 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 835 return true; 836} 837 838/// Returns an AttributeSet representing the attributes applied to the return 839/// value of the given call. 840static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 841 SmallVector<Attribute::AttrKind, 2> Attrs; 842 if (CLI.RetSExt) 843 Attrs.push_back(Attribute::SExt); 844 if (CLI.RetZExt) 845 Attrs.push_back(Attribute::ZExt); 846 if (CLI.IsInReg) 847 Attrs.push_back(Attribute::InReg); 848 849 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex, 850 Attrs); 851} 852 853bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 854 unsigned NumArgs) { 855 ImmutableCallSite CS(CI); 856 857 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 858 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 859 Type *RetTy = FTy->getReturnType(); 860 861 ArgListTy Args; 862 Args.reserve(NumArgs); 863 864 // Populate the argument list. 865 // Attributes for args start at offset 1, after the return attribute. 866 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 867 Value *V = CI->getOperand(ArgI); 868 869 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 870 871 ArgListEntry Entry; 872 Entry.Val = V; 873 Entry.Ty = V->getType(); 874 Entry.setAttributes(&CS, ArgI + 1); 875 Args.push_back(Entry); 876 } 877 878 CallLoweringInfo CLI; 879 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs); 880 881 return lowerCallTo(CLI); 882} 883 884bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 885 // Handle the incoming return values from the call. 886 CLI.clearIns(); 887 SmallVector<EVT, 4> RetTys; 888 ComputeValueVTs(TLI, CLI.RetTy, RetTys); 889 890 SmallVector<ISD::OutputArg, 4> Outs; 891 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI); 892 893 bool CanLowerReturn = TLI.CanLowerReturn( 894 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 895 896 // FIXME: sret demotion isn't supported yet - bail out. 897 if (!CanLowerReturn) 898 return false; 899 900 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 901 EVT VT = RetTys[I]; 902 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 903 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 904 for (unsigned i = 0; i != NumRegs; ++i) { 905 ISD::InputArg MyFlags; 906 MyFlags.VT = RegisterVT; 907 MyFlags.ArgVT = VT; 908 MyFlags.Used = CLI.IsReturnValueUsed; 909 if (CLI.RetSExt) 910 MyFlags.Flags.setSExt(); 911 if (CLI.RetZExt) 912 MyFlags.Flags.setZExt(); 913 if (CLI.IsInReg) 914 MyFlags.Flags.setInReg(); 915 CLI.Ins.push_back(MyFlags); 916 } 917 } 918 919 // Handle all of the outgoing arguments. 920 CLI.clearOuts(); 921 for (auto &Arg : CLI.getArgs()) { 922 Type *FinalType = Arg.Ty; 923 if (Arg.IsByVal) 924 FinalType = cast<PointerType>(Arg.Ty)->getElementType(); 925 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 926 FinalType, CLI.CallConv, CLI.IsVarArg); 927 928 ISD::ArgFlagsTy Flags; 929 if (Arg.IsZExt) 930 Flags.setZExt(); 931 if (Arg.IsSExt) 932 Flags.setSExt(); 933 if (Arg.IsInReg) 934 Flags.setInReg(); 935 if (Arg.IsSRet) 936 Flags.setSRet(); 937 if (Arg.IsByVal) 938 Flags.setByVal(); 939 if (Arg.IsInAlloca) { 940 Flags.setInAlloca(); 941 // Set the byval flag for CCAssignFn callbacks that don't know about 942 // inalloca. This way we can know how many bytes we should've allocated 943 // and how many bytes a callee cleanup function will pop. If we port 944 // inalloca to more targets, we'll have to add custom inalloca handling in 945 // the various CC lowering callbacks. 946 Flags.setByVal(); 947 } 948 if (Arg.IsByVal || Arg.IsInAlloca) { 949 PointerType *Ty = cast<PointerType>(Arg.Ty); 950 Type *ElementTy = Ty->getElementType(); 951 unsigned FrameSize = DL.getTypeAllocSize(ElementTy); 952 // For ByVal, alignment should come from FE. BE will guess if this info is 953 // not there, but there are cases it cannot get right. 954 unsigned FrameAlign = Arg.Alignment; 955 if (!FrameAlign) 956 FrameAlign = TLI.getByValTypeAlignment(ElementTy); 957 Flags.setByValSize(FrameSize); 958 Flags.setByValAlign(FrameAlign); 959 } 960 if (Arg.IsNest) 961 Flags.setNest(); 962 if (NeedsRegBlock) 963 Flags.setInConsecutiveRegs(); 964 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty); 965 Flags.setOrigAlign(OriginalAlignment); 966 967 CLI.OutVals.push_back(Arg.Val); 968 CLI.OutFlags.push_back(Flags); 969 } 970 971 if (!fastLowerCall(CLI)) 972 return false; 973 974 // Set all unused physreg defs as dead. 975 assert(CLI.Call && "No call instruction specified."); 976 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 977 978 if (CLI.NumResultRegs && CLI.CS) 979 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs); 980 981 return true; 982} 983 984bool FastISel::lowerCall(const CallInst *CI) { 985 ImmutableCallSite CS(CI); 986 987 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 988 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType()); 989 Type *RetTy = FuncTy->getReturnType(); 990 991 ArgListTy Args; 992 ArgListEntry Entry; 993 Args.reserve(CS.arg_size()); 994 995 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 996 i != e; ++i) { 997 Value *V = *i; 998 999 // Skip empty types 1000 if (V->getType()->isEmptyTy()) 1001 continue; 1002 1003 Entry.Val = V; 1004 Entry.Ty = V->getType(); 1005 1006 // Skip the first return-type Attribute to get to params. 1007 Entry.setAttributes(&CS, i - CS.arg_begin() + 1); 1008 Args.push_back(Entry); 1009 } 1010 1011 // Check if target-independent constraints permit a tail call here. 1012 // Target-dependent constraints are checked within fastLowerCall. 1013 bool IsTailCall = CI->isTailCall(); 1014 if (IsTailCall && !isInTailCallPosition(CS, TM)) 1015 IsTailCall = false; 1016 1017 CallLoweringInfo CLI; 1018 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS) 1019 .setTailCall(IsTailCall); 1020 1021 return lowerCallTo(CLI); 1022} 1023 1024bool FastISel::selectCall(const User *I) { 1025 const CallInst *Call = cast<CallInst>(I); 1026 1027 // Handle simple inline asms. 1028 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) { 1029 // If the inline asm has side effects, then make sure that no local value 1030 // lives across by flushing the local value map. 1031 if (IA->hasSideEffects()) 1032 flushLocalValueMap(); 1033 1034 // Don't attempt to handle constraints. 1035 if (!IA->getConstraintString().empty()) 1036 return false; 1037 1038 unsigned ExtraInfo = 0; 1039 if (IA->hasSideEffects()) 1040 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1041 if (IA->isAlignStack()) 1042 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1043 1044 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1045 TII.get(TargetOpcode::INLINEASM)) 1046 .addExternalSymbol(IA->getAsmString().c_str()) 1047 .addImm(ExtraInfo); 1048 return true; 1049 } 1050 1051 MachineModuleInfo &MMI = FuncInfo.MF->getMMI(); 1052 ComputeUsesVAFloatArgument(*Call, &MMI); 1053 1054 // Handle intrinsic function calls. 1055 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1056 return selectIntrinsicCall(II); 1057 1058 // Usually, it does not make sense to initialize a value, 1059 // make an unrelated function call and use the value, because 1060 // it tends to be spilled on the stack. So, we move the pointer 1061 // to the last local value to the beginning of the block, so that 1062 // all the values which have already been materialized, 1063 // appear after the call. It also makes sense to skip intrinsics 1064 // since they tend to be inlined. 1065 flushLocalValueMap(); 1066 1067 return lowerCall(Call); 1068} 1069 1070bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1071 switch (II->getIntrinsicID()) { 1072 default: 1073 break; 1074 // At -O0 we don't care about the lifetime intrinsics. 1075 case Intrinsic::lifetime_start: 1076 case Intrinsic::lifetime_end: 1077 // The donothing intrinsic does, well, nothing. 1078 case Intrinsic::donothing: 1079 return true; 1080 case Intrinsic::dbg_declare: { 1081 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1082 DIVariable DIVar(DI->getVariable()); 1083 assert((!DIVar || DIVar.isVariable()) && 1084 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1085 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) { 1086 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1087 return true; 1088 } 1089 1090 const Value *Address = DI->getAddress(); 1091 if (!Address || isa<UndefValue>(Address)) { 1092 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1093 return true; 1094 } 1095 1096 unsigned Offset = 0; 1097 Optional<MachineOperand> Op; 1098 if (const auto *Arg = dyn_cast<Argument>(Address)) 1099 // Some arguments' frame index is recorded during argument lowering. 1100 Offset = FuncInfo.getArgumentFrameIndex(Arg); 1101 if (Offset) 1102 Op = MachineOperand::CreateFI(Offset); 1103 if (!Op) 1104 if (unsigned Reg = lookUpRegForValue(Address)) 1105 Op = MachineOperand::CreateReg(Reg, false); 1106 1107 // If we have a VLA that has a "use" in a metadata node that's then used 1108 // here but it has no other uses, then we have a problem. E.g., 1109 // 1110 // int foo (const int *x) { 1111 // char a[*x]; 1112 // return 0; 1113 // } 1114 // 1115 // If we assign 'a' a vreg and fast isel later on has to use the selection 1116 // DAG isel, it will want to copy the value to the vreg. However, there are 1117 // no uses, which goes counter to what selection DAG isel expects. 1118 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1119 (!isa<AllocaInst>(Address) || 1120 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1121 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1122 false); 1123 1124 if (Op) { 1125 if (Op->isReg()) { 1126 Op->setIsDebug(true); 1127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1128 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0, 1129 DI->getVariable(), DI->getExpression()); 1130 } else 1131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1132 TII.get(TargetOpcode::DBG_VALUE)) 1133 .addOperand(*Op) 1134 .addImm(0) 1135 .addMetadata(DI->getVariable()) 1136 .addMetadata(DI->getExpression()); 1137 } else { 1138 // We can't yet handle anything else here because it would require 1139 // generating code, thus altering codegen because of debug info. 1140 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1141 } 1142 return true; 1143 } 1144 case Intrinsic::dbg_value: { 1145 // This form of DBG_VALUE is target-independent. 1146 const DbgValueInst *DI = cast<DbgValueInst>(II); 1147 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1148 const Value *V = DI->getValue(); 1149 if (!V) { 1150 // Currently the optimizer can produce this; insert an undef to 1151 // help debugging. Probably the optimizer should not do this. 1152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1153 .addReg(0U) 1154 .addImm(DI->getOffset()) 1155 .addMetadata(DI->getVariable()) 1156 .addMetadata(DI->getExpression()); 1157 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1158 if (CI->getBitWidth() > 64) 1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1160 .addCImm(CI) 1161 .addImm(DI->getOffset()) 1162 .addMetadata(DI->getVariable()) 1163 .addMetadata(DI->getExpression()); 1164 else 1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1166 .addImm(CI->getZExtValue()) 1167 .addImm(DI->getOffset()) 1168 .addMetadata(DI->getVariable()) 1169 .addMetadata(DI->getExpression()); 1170 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1172 .addFPImm(CF) 1173 .addImm(DI->getOffset()) 1174 .addMetadata(DI->getVariable()) 1175 .addMetadata(DI->getExpression()); 1176 } else if (unsigned Reg = lookUpRegForValue(V)) { 1177 // FIXME: This does not handle register-indirect values at offset 0. 1178 bool IsIndirect = DI->getOffset() != 0; 1179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, 1180 DI->getOffset(), DI->getVariable(), DI->getExpression()); 1181 } else { 1182 // We can't yet handle anything else here because it would require 1183 // generating code, thus altering codegen because of debug info. 1184 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1185 } 1186 return true; 1187 } 1188 case Intrinsic::objectsize: { 1189 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1)); 1190 unsigned long long Res = CI->isZero() ? -1ULL : 0; 1191 Constant *ResCI = ConstantInt::get(II->getType(), Res); 1192 unsigned ResultReg = getRegForValue(ResCI); 1193 if (!ResultReg) 1194 return false; 1195 updateValueMap(II, ResultReg); 1196 return true; 1197 } 1198 case Intrinsic::expect: { 1199 unsigned ResultReg = getRegForValue(II->getArgOperand(0)); 1200 if (!ResultReg) 1201 return false; 1202 updateValueMap(II, ResultReg); 1203 return true; 1204 } 1205 case Intrinsic::experimental_stackmap: 1206 return selectStackmap(II); 1207 case Intrinsic::experimental_patchpoint_void: 1208 case Intrinsic::experimental_patchpoint_i64: 1209 return selectPatchpoint(II); 1210 } 1211 1212 return fastLowerIntrinsicCall(II); 1213} 1214 1215bool FastISel::selectCast(const User *I, unsigned Opcode) { 1216 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1217 EVT DstVT = TLI.getValueType(I->getType()); 1218 1219 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1220 !DstVT.isSimple()) 1221 // Unhandled type. Halt "fast" selection and bail. 1222 return false; 1223 1224 // Check if the destination type is legal. 1225 if (!TLI.isTypeLegal(DstVT)) 1226 return false; 1227 1228 // Check if the source operand is legal. 1229 if (!TLI.isTypeLegal(SrcVT)) 1230 return false; 1231 1232 unsigned InputReg = getRegForValue(I->getOperand(0)); 1233 if (!InputReg) 1234 // Unhandled operand. Halt "fast" selection and bail. 1235 return false; 1236 1237 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 1238 1239 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1240 Opcode, InputReg, InputRegIsKill); 1241 if (!ResultReg) 1242 return false; 1243 1244 updateValueMap(I, ResultReg); 1245 return true; 1246} 1247 1248bool FastISel::selectBitCast(const User *I) { 1249 // If the bitcast doesn't change the type, just use the operand value. 1250 if (I->getType() == I->getOperand(0)->getType()) { 1251 unsigned Reg = getRegForValue(I->getOperand(0)); 1252 if (!Reg) 1253 return false; 1254 updateValueMap(I, Reg); 1255 return true; 1256 } 1257 1258 // Bitcasts of other values become reg-reg copies or BITCAST operators. 1259 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType()); 1260 EVT DstEVT = TLI.getValueType(I->getType()); 1261 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1262 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1263 // Unhandled type. Halt "fast" selection and bail. 1264 return false; 1265 1266 MVT SrcVT = SrcEVT.getSimpleVT(); 1267 MVT DstVT = DstEVT.getSimpleVT(); 1268 unsigned Op0 = getRegForValue(I->getOperand(0)); 1269 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1270 return false; 1271 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 1272 1273 // First, try to perform the bitcast by inserting a reg-reg copy. 1274 unsigned ResultReg = 0; 1275 if (SrcVT == DstVT) { 1276 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT); 1277 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); 1278 // Don't attempt a cross-class copy. It will likely fail. 1279 if (SrcClass == DstClass) { 1280 ResultReg = createResultReg(DstClass); 1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1282 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); 1283 } 1284 } 1285 1286 // If the reg-reg copy failed, select a BITCAST opcode. 1287 if (!ResultReg) 1288 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); 1289 1290 if (!ResultReg) 1291 return false; 1292 1293 updateValueMap(I, ResultReg); 1294 return true; 1295} 1296 1297bool FastISel::selectInstruction(const Instruction *I) { 1298 // Just before the terminator instruction, insert instructions to 1299 // feed PHI nodes in successor blocks. 1300 if (isa<TerminatorInst>(I)) 1301 if (!handlePHINodesInSuccessorBlocks(I->getParent())) 1302 return false; 1303 1304 DbgLoc = I->getDebugLoc(); 1305 1306 SavedInsertPt = FuncInfo.InsertPt; 1307 1308 if (const auto *Call = dyn_cast<CallInst>(I)) { 1309 const Function *F = Call->getCalledFunction(); 1310 LibFunc::Func Func; 1311 1312 // As a special case, don't handle calls to builtin library functions that 1313 // may be translated directly to target instructions. 1314 if (F && !F->hasLocalLinkage() && F->hasName() && 1315 LibInfo->getLibFunc(F->getName(), Func) && 1316 LibInfo->hasOptimizedCodeGen(Func)) 1317 return false; 1318 1319 // Don't handle Intrinsic::trap if a trap funciton is specified. 1320 if (F && F->getIntrinsicID() == Intrinsic::trap && 1321 !TM.Options.getTrapFunctionName().empty()) 1322 return false; 1323 } 1324 1325 // First, try doing target-independent selection. 1326 if (!SkipTargetIndependentISel) { 1327 if (selectOperator(I, I->getOpcode())) { 1328 ++NumFastIselSuccessIndependent; 1329 DbgLoc = DebugLoc(); 1330 return true; 1331 } 1332 // Remove dead code. 1333 recomputeInsertPt(); 1334 if (SavedInsertPt != FuncInfo.InsertPt) 1335 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1336 SavedInsertPt = FuncInfo.InsertPt; 1337 } 1338 // Next, try calling the target to attempt to handle the instruction. 1339 if (fastSelectInstruction(I)) { 1340 ++NumFastIselSuccessTarget; 1341 DbgLoc = DebugLoc(); 1342 return true; 1343 } 1344 // Remove dead code. 1345 recomputeInsertPt(); 1346 if (SavedInsertPt != FuncInfo.InsertPt) 1347 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1348 1349 DbgLoc = DebugLoc(); 1350 // Undo phi node updates, because they will be added again by SelectionDAG. 1351 if (isa<TerminatorInst>(I)) 1352 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1353 return false; 1354} 1355 1356/// Emit an unconditional branch to the given block, unless it is the immediate 1357/// (fall-through) successor, and update the CFG. 1358void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) { 1359 if (FuncInfo.MBB->getBasicBlock()->size() > 1 && 1360 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1361 // For more accurate line information if this is the only instruction 1362 // in the block then emit it, otherwise we have the unconditional 1363 // fall-through case, which needs no instructions. 1364 } else { 1365 // The unconditional branch case. 1366 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr, 1367 SmallVector<MachineOperand, 0>(), DbgLoc); 1368 } 1369 uint32_t BranchWeight = 0; 1370 if (FuncInfo.BPI) 1371 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(), 1372 MSucc->getBasicBlock()); 1373 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight); 1374} 1375 1376/// Emit an FNeg operation. 1377bool FastISel::selectFNeg(const User *I) { 1378 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); 1379 if (!OpReg) 1380 return false; 1381 bool OpRegIsKill = hasTrivialKill(I); 1382 1383 // If the target has ISD::FNEG, use it. 1384 EVT VT = TLI.getValueType(I->getType()); 1385 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1386 OpReg, OpRegIsKill); 1387 if (ResultReg) { 1388 updateValueMap(I, ResultReg); 1389 return true; 1390 } 1391 1392 // Bitcast the value to integer, twiddle the sign bit with xor, 1393 // and then bitcast it back to floating-point. 1394 if (VT.getSizeInBits() > 64) 1395 return false; 1396 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1397 if (!TLI.isTypeLegal(IntVT)) 1398 return false; 1399 1400 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1401 ISD::BITCAST, OpReg, OpRegIsKill); 1402 if (!IntReg) 1403 return false; 1404 1405 unsigned IntResultReg = fastEmit_ri_( 1406 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true, 1407 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1408 if (!IntResultReg) 1409 return false; 1410 1411 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1412 IntResultReg, /*IsKill=*/true); 1413 if (!ResultReg) 1414 return false; 1415 1416 updateValueMap(I, ResultReg); 1417 return true; 1418} 1419 1420bool FastISel::selectExtractValue(const User *U) { 1421 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1422 if (!EVI) 1423 return false; 1424 1425 // Make sure we only try to handle extracts with a legal result. But also 1426 // allow i1 because it's easy. 1427 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true); 1428 if (!RealVT.isSimple()) 1429 return false; 1430 MVT VT = RealVT.getSimpleVT(); 1431 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1432 return false; 1433 1434 const Value *Op0 = EVI->getOperand(0); 1435 Type *AggTy = Op0->getType(); 1436 1437 // Get the base result register. 1438 unsigned ResultReg; 1439 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0); 1440 if (I != FuncInfo.ValueMap.end()) 1441 ResultReg = I->second; 1442 else if (isa<Instruction>(Op0)) 1443 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1444 else 1445 return false; // fast-isel can't handle aggregate constants at the moment 1446 1447 // Get the actual result register, which is an offset from the base register. 1448 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1449 1450 SmallVector<EVT, 4> AggValueVTs; 1451 ComputeValueVTs(TLI, AggTy, AggValueVTs); 1452 1453 for (unsigned i = 0; i < VTIndex; i++) 1454 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1455 1456 updateValueMap(EVI, ResultReg); 1457 return true; 1458} 1459 1460bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1461 switch (Opcode) { 1462 case Instruction::Add: 1463 return selectBinaryOp(I, ISD::ADD); 1464 case Instruction::FAdd: 1465 return selectBinaryOp(I, ISD::FADD); 1466 case Instruction::Sub: 1467 return selectBinaryOp(I, ISD::SUB); 1468 case Instruction::FSub: 1469 // FNeg is currently represented in LLVM IR as a special case of FSub. 1470 if (BinaryOperator::isFNeg(I)) 1471 return selectFNeg(I); 1472 return selectBinaryOp(I, ISD::FSUB); 1473 case Instruction::Mul: 1474 return selectBinaryOp(I, ISD::MUL); 1475 case Instruction::FMul: 1476 return selectBinaryOp(I, ISD::FMUL); 1477 case Instruction::SDiv: 1478 return selectBinaryOp(I, ISD::SDIV); 1479 case Instruction::UDiv: 1480 return selectBinaryOp(I, ISD::UDIV); 1481 case Instruction::FDiv: 1482 return selectBinaryOp(I, ISD::FDIV); 1483 case Instruction::SRem: 1484 return selectBinaryOp(I, ISD::SREM); 1485 case Instruction::URem: 1486 return selectBinaryOp(I, ISD::UREM); 1487 case Instruction::FRem: 1488 return selectBinaryOp(I, ISD::FREM); 1489 case Instruction::Shl: 1490 return selectBinaryOp(I, ISD::SHL); 1491 case Instruction::LShr: 1492 return selectBinaryOp(I, ISD::SRL); 1493 case Instruction::AShr: 1494 return selectBinaryOp(I, ISD::SRA); 1495 case Instruction::And: 1496 return selectBinaryOp(I, ISD::AND); 1497 case Instruction::Or: 1498 return selectBinaryOp(I, ISD::OR); 1499 case Instruction::Xor: 1500 return selectBinaryOp(I, ISD::XOR); 1501 1502 case Instruction::GetElementPtr: 1503 return selectGetElementPtr(I); 1504 1505 case Instruction::Br: { 1506 const BranchInst *BI = cast<BranchInst>(I); 1507 1508 if (BI->isUnconditional()) { 1509 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1510 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1511 fastEmitBranch(MSucc, BI->getDebugLoc()); 1512 return true; 1513 } 1514 1515 // Conditional branches are not handed yet. 1516 // Halt "fast" selection and bail. 1517 return false; 1518 } 1519 1520 case Instruction::Unreachable: 1521 if (TM.Options.TrapUnreachable) 1522 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1523 else 1524 return true; 1525 1526 case Instruction::Alloca: 1527 // FunctionLowering has the static-sized case covered. 1528 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1529 return true; 1530 1531 // Dynamic-sized alloca is not handled yet. 1532 return false; 1533 1534 case Instruction::Call: 1535 return selectCall(I); 1536 1537 case Instruction::BitCast: 1538 return selectBitCast(I); 1539 1540 case Instruction::FPToSI: 1541 return selectCast(I, ISD::FP_TO_SINT); 1542 case Instruction::ZExt: 1543 return selectCast(I, ISD::ZERO_EXTEND); 1544 case Instruction::SExt: 1545 return selectCast(I, ISD::SIGN_EXTEND); 1546 case Instruction::Trunc: 1547 return selectCast(I, ISD::TRUNCATE); 1548 case Instruction::SIToFP: 1549 return selectCast(I, ISD::SINT_TO_FP); 1550 1551 case Instruction::IntToPtr: // Deliberate fall-through. 1552 case Instruction::PtrToInt: { 1553 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1554 EVT DstVT = TLI.getValueType(I->getType()); 1555 if (DstVT.bitsGT(SrcVT)) 1556 return selectCast(I, ISD::ZERO_EXTEND); 1557 if (DstVT.bitsLT(SrcVT)) 1558 return selectCast(I, ISD::TRUNCATE); 1559 unsigned Reg = getRegForValue(I->getOperand(0)); 1560 if (!Reg) 1561 return false; 1562 updateValueMap(I, Reg); 1563 return true; 1564 } 1565 1566 case Instruction::ExtractValue: 1567 return selectExtractValue(I); 1568 1569 case Instruction::PHI: 1570 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1571 1572 default: 1573 // Unhandled instruction. Halt "fast" selection and bail. 1574 return false; 1575 } 1576} 1577 1578FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1579 const TargetLibraryInfo *LibInfo, 1580 bool SkipTargetIndependentISel) 1581 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1582 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1583 TM(FuncInfo.MF->getTarget()), DL(*MF->getSubtarget().getDataLayout()), 1584 TII(*MF->getSubtarget().getInstrInfo()), 1585 TLI(*MF->getSubtarget().getTargetLowering()), 1586 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1587 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1588 1589FastISel::~FastISel() {} 1590 1591bool FastISel::fastLowerArguments() { return false; } 1592 1593bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1594 1595bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1596 return false; 1597} 1598 1599unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1600 1601unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, 1602 bool /*Op0IsKill*/) { 1603 return 0; 1604} 1605 1606unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 1607 bool /*Op0IsKill*/, unsigned /*Op1*/, 1608 bool /*Op1IsKill*/) { 1609 return 0; 1610} 1611 1612unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1613 return 0; 1614} 1615 1616unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 1617 const ConstantFP * /*FPImm*/) { 1618 return 0; 1619} 1620 1621unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 1622 bool /*Op0IsKill*/, uint64_t /*Imm*/) { 1623 return 0; 1624} 1625 1626unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/, 1627 bool /*Op0IsKill*/, 1628 const ConstantFP * /*FPImm*/) { 1629 return 0; 1630} 1631 1632unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/, 1633 bool /*Op0IsKill*/, unsigned /*Op1*/, 1634 bool /*Op1IsKill*/, uint64_t /*Imm*/) { 1635 return 0; 1636} 1637 1638/// This method is a wrapper of fastEmit_ri. It first tries to emit an 1639/// instruction with an immediate operand using fastEmit_ri. 1640/// If that fails, it materializes the immediate into a register and try 1641/// fastEmit_rr instead. 1642unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 1643 bool Op0IsKill, uint64_t Imm, MVT ImmType) { 1644 // If this is a multiply by a power of two, emit this as a shift left. 1645 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1646 Opcode = ISD::SHL; 1647 Imm = Log2_64(Imm); 1648 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1649 // div x, 8 -> srl x, 3 1650 Opcode = ISD::SRL; 1651 Imm = Log2_64(Imm); 1652 } 1653 1654 // Horrible hack (to be removed), check to make sure shift amounts are 1655 // in-range. 1656 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1657 Imm >= VT.getSizeInBits()) 1658 return 0; 1659 1660 // First check if immediate type is legal. If not, we can't use the ri form. 1661 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 1662 if (ResultReg) 1663 return ResultReg; 1664 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1665 if (!MaterialReg) { 1666 // This is a bit ugly/slow, but failing here means falling out of 1667 // fast-isel, which would be very slow. 1668 IntegerType *ITy = 1669 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1670 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1671 if (!MaterialReg) 1672 return 0; 1673 } 1674 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, 1675 /*IsKill=*/true); 1676} 1677 1678unsigned FastISel::createResultReg(const TargetRegisterClass *RC) { 1679 return MRI.createVirtualRegister(RC); 1680} 1681 1682unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, 1683 unsigned OpNum) { 1684 if (TargetRegisterInfo::isVirtualRegister(Op)) { 1685 const TargetRegisterClass *RegClass = 1686 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1687 if (!MRI.constrainRegClass(Op, RegClass)) { 1688 // If it's not legal to COPY between the register classes, something 1689 // has gone very wrong before we got here. 1690 unsigned NewOp = createResultReg(RegClass); 1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1692 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1693 return NewOp; 1694 } 1695 } 1696 return Op; 1697} 1698 1699unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1700 const TargetRegisterClass *RC) { 1701 unsigned ResultReg = createResultReg(RC); 1702 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1703 1704 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); 1705 return ResultReg; 1706} 1707 1708unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1709 const TargetRegisterClass *RC, unsigned Op0, 1710 bool Op0IsKill) { 1711 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1712 1713 unsigned ResultReg = createResultReg(RC); 1714 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1715 1716 if (II.getNumDefs() >= 1) 1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1718 .addReg(Op0, getKillRegState(Op0IsKill)); 1719 else { 1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1721 .addReg(Op0, getKillRegState(Op0IsKill)); 1722 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1723 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1724 } 1725 1726 return ResultReg; 1727} 1728 1729unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1730 const TargetRegisterClass *RC, unsigned Op0, 1731 bool Op0IsKill, unsigned Op1, 1732 bool Op1IsKill) { 1733 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1734 1735 unsigned ResultReg = createResultReg(RC); 1736 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1737 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1738 1739 if (II.getNumDefs() >= 1) 1740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1741 .addReg(Op0, getKillRegState(Op0IsKill)) 1742 .addReg(Op1, getKillRegState(Op1IsKill)); 1743 else { 1744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1745 .addReg(Op0, getKillRegState(Op0IsKill)) 1746 .addReg(Op1, getKillRegState(Op1IsKill)); 1747 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1748 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1749 } 1750 return ResultReg; 1751} 1752 1753unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 1754 const TargetRegisterClass *RC, unsigned Op0, 1755 bool Op0IsKill, unsigned Op1, 1756 bool Op1IsKill, unsigned Op2, 1757 bool Op2IsKill) { 1758 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1759 1760 unsigned ResultReg = createResultReg(RC); 1761 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1762 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1763 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 1764 1765 if (II.getNumDefs() >= 1) 1766 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1767 .addReg(Op0, getKillRegState(Op0IsKill)) 1768 .addReg(Op1, getKillRegState(Op1IsKill)) 1769 .addReg(Op2, getKillRegState(Op2IsKill)); 1770 else { 1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1772 .addReg(Op0, getKillRegState(Op0IsKill)) 1773 .addReg(Op1, getKillRegState(Op1IsKill)) 1774 .addReg(Op2, getKillRegState(Op2IsKill)); 1775 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1776 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1777 } 1778 return ResultReg; 1779} 1780 1781unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 1782 const TargetRegisterClass *RC, unsigned Op0, 1783 bool Op0IsKill, uint64_t Imm) { 1784 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1785 1786 unsigned ResultReg = createResultReg(RC); 1787 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1788 1789 if (II.getNumDefs() >= 1) 1790 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1791 .addReg(Op0, getKillRegState(Op0IsKill)) 1792 .addImm(Imm); 1793 else { 1794 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1795 .addReg(Op0, getKillRegState(Op0IsKill)) 1796 .addImm(Imm); 1797 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1798 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1799 } 1800 return ResultReg; 1801} 1802 1803unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 1804 const TargetRegisterClass *RC, unsigned Op0, 1805 bool Op0IsKill, uint64_t Imm1, 1806 uint64_t Imm2) { 1807 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1808 1809 unsigned ResultReg = createResultReg(RC); 1810 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1811 1812 if (II.getNumDefs() >= 1) 1813 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1814 .addReg(Op0, getKillRegState(Op0IsKill)) 1815 .addImm(Imm1) 1816 .addImm(Imm2); 1817 else { 1818 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1819 .addReg(Op0, getKillRegState(Op0IsKill)) 1820 .addImm(Imm1) 1821 .addImm(Imm2); 1822 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1823 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1824 } 1825 return ResultReg; 1826} 1827 1828unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode, 1829 const TargetRegisterClass *RC, unsigned Op0, 1830 bool Op0IsKill, const ConstantFP *FPImm) { 1831 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1832 1833 unsigned ResultReg = createResultReg(RC); 1834 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1835 1836 if (II.getNumDefs() >= 1) 1837 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1838 .addReg(Op0, getKillRegState(Op0IsKill)) 1839 .addFPImm(FPImm); 1840 else { 1841 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1842 .addReg(Op0, getKillRegState(Op0IsKill)) 1843 .addFPImm(FPImm); 1844 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1845 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1846 } 1847 return ResultReg; 1848} 1849 1850unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 1851 const TargetRegisterClass *RC, unsigned Op0, 1852 bool Op0IsKill, unsigned Op1, 1853 bool Op1IsKill, uint64_t Imm) { 1854 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1855 1856 unsigned ResultReg = createResultReg(RC); 1857 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1858 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1859 1860 if (II.getNumDefs() >= 1) 1861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1862 .addReg(Op0, getKillRegState(Op0IsKill)) 1863 .addReg(Op1, getKillRegState(Op1IsKill)) 1864 .addImm(Imm); 1865 else { 1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1867 .addReg(Op0, getKillRegState(Op0IsKill)) 1868 .addReg(Op1, getKillRegState(Op1IsKill)) 1869 .addImm(Imm); 1870 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1871 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1872 } 1873 return ResultReg; 1874} 1875 1876unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode, 1877 const TargetRegisterClass *RC, 1878 unsigned Op0, bool Op0IsKill, unsigned Op1, 1879 bool Op1IsKill, uint64_t Imm1, 1880 uint64_t Imm2) { 1881 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1882 1883 unsigned ResultReg = createResultReg(RC); 1884 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1885 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1886 1887 if (II.getNumDefs() >= 1) 1888 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1889 .addReg(Op0, getKillRegState(Op0IsKill)) 1890 .addReg(Op1, getKillRegState(Op1IsKill)) 1891 .addImm(Imm1) 1892 .addImm(Imm2); 1893 else { 1894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1895 .addReg(Op0, getKillRegState(Op0IsKill)) 1896 .addReg(Op1, getKillRegState(Op1IsKill)) 1897 .addImm(Imm1) 1898 .addImm(Imm2); 1899 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1900 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1901 } 1902 return ResultReg; 1903} 1904 1905unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 1906 const TargetRegisterClass *RC, uint64_t Imm) { 1907 unsigned ResultReg = createResultReg(RC); 1908 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1909 1910 if (II.getNumDefs() >= 1) 1911 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1912 .addImm(Imm); 1913 else { 1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); 1915 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1916 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1917 } 1918 return ResultReg; 1919} 1920 1921unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode, 1922 const TargetRegisterClass *RC, uint64_t Imm1, 1923 uint64_t Imm2) { 1924 unsigned ResultReg = createResultReg(RC); 1925 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1926 1927 if (II.getNumDefs() >= 1) 1928 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1929 .addImm(Imm1) 1930 .addImm(Imm2); 1931 else { 1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1) 1933 .addImm(Imm2); 1934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1935 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1936 } 1937 return ResultReg; 1938} 1939 1940unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 1941 bool Op0IsKill, uint32_t Idx) { 1942 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 1943 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 1944 "Cannot yet extract from physregs"); 1945 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 1946 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 1947 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), 1948 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); 1949 return ResultReg; 1950} 1951 1952/// Emit MachineInstrs to compute the value of Op with all but the least 1953/// significant bit set to zero. 1954unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 1955 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 1956} 1957 1958/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 1959/// Emit code to ensure constants are copied into registers when needed. 1960/// Remember the virtual registers that need to be added to the Machine PHI 1961/// nodes as input. We cannot just directly add them, because expansion 1962/// might result in multiple MBB's for one BB. As such, the start of the 1963/// BB might correspond to a different MBB than the end. 1964bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 1965 const TerminatorInst *TI = LLVMBB->getTerminator(); 1966 1967 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 1968 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 1969 1970 // Check successor nodes' PHI nodes that expect a constant to be available 1971 // from this block. 1972 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 1973 const BasicBlock *SuccBB = TI->getSuccessor(succ); 1974 if (!isa<PHINode>(SuccBB->begin())) 1975 continue; 1976 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 1977 1978 // If this terminator has multiple identical successors (common for 1979 // switches), only handle each succ once. 1980 if (!SuccsHandled.insert(SuccMBB).second) 1981 continue; 1982 1983 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 1984 1985 // At this point we know that there is a 1-1 correspondence between LLVM PHI 1986 // nodes and Machine PHI nodes, but the incoming operands have not been 1987 // emitted yet. 1988 for (BasicBlock::const_iterator I = SuccBB->begin(); 1989 const auto *PN = dyn_cast<PHINode>(I); ++I) { 1990 1991 // Ignore dead phi's. 1992 if (PN->use_empty()) 1993 continue; 1994 1995 // Only handle legal types. Two interesting things to note here. First, 1996 // by bailing out early, we may leave behind some dead instructions, 1997 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 1998 // own moves. Second, this check is necessary because FastISel doesn't 1999 // use CreateRegs to create registers, so it always creates 2000 // exactly one register for each non-void instruction. 2001 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); 2002 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2003 // Handle integer promotions, though, because they're common and easy. 2004 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2005 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2006 return false; 2007 } 2008 } 2009 2010 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 2011 2012 // Set the DebugLoc for the copy. Prefer the location of the operand 2013 // if there is one; use the location of the PHI otherwise. 2014 DbgLoc = PN->getDebugLoc(); 2015 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2016 DbgLoc = Inst->getDebugLoc(); 2017 2018 unsigned Reg = getRegForValue(PHIOp); 2019 if (!Reg) { 2020 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2021 return false; 2022 } 2023 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); 2024 DbgLoc = DebugLoc(); 2025 } 2026 } 2027 2028 return true; 2029} 2030 2031bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2032 assert(LI->hasOneUse() && 2033 "tryToFoldLoad expected a LoadInst with a single use"); 2034 // We know that the load has a single use, but don't know what it is. If it 2035 // isn't one of the folded instructions, then we can't succeed here. Handle 2036 // this by scanning the single-use users of the load until we get to FoldInst. 2037 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2038 2039 const Instruction *TheUser = LI->user_back(); 2040 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2041 // Stay in the right block. 2042 TheUser->getParent() == FoldInst->getParent() && 2043 --MaxUsers) { // Don't scan too far. 2044 // If there are multiple or no uses of this instruction, then bail out. 2045 if (!TheUser->hasOneUse()) 2046 return false; 2047 2048 TheUser = TheUser->user_back(); 2049 } 2050 2051 // If we didn't find the fold instruction, then we failed to collapse the 2052 // sequence. 2053 if (TheUser != FoldInst) 2054 return false; 2055 2056 // Don't try to fold volatile loads. Target has to deal with alignment 2057 // constraints. 2058 if (LI->isVolatile()) 2059 return false; 2060 2061 // Figure out which vreg this is going into. If there is no assigned vreg yet 2062 // then there actually was no reference to it. Perhaps the load is referenced 2063 // by a dead instruction. 2064 unsigned LoadReg = getRegForValue(LI); 2065 if (!LoadReg) 2066 return false; 2067 2068 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2069 // may mean that the instruction got lowered to multiple MIs, or the use of 2070 // the loaded value ended up being multiple operands of the result. 2071 if (!MRI.hasOneUse(LoadReg)) 2072 return false; 2073 2074 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2075 MachineInstr *User = RI->getParent(); 2076 2077 // Set the insertion point properly. Folding the load can cause generation of 2078 // other random instructions (like sign extends) for addressing modes; make 2079 // sure they get inserted in a logical place before the new instruction. 2080 FuncInfo.InsertPt = User; 2081 FuncInfo.MBB = User->getParent(); 2082 2083 // Ask the target to try folding the load. 2084 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2085} 2086 2087bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2088 // Must be an add. 2089 if (!isa<AddOperator>(Add)) 2090 return false; 2091 // Type size needs to match. 2092 if (DL.getTypeSizeInBits(GEP->getType()) != 2093 DL.getTypeSizeInBits(Add->getType())) 2094 return false; 2095 // Must be in the same basic block. 2096 if (isa<Instruction>(Add) && 2097 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2098 return false; 2099 // Must have a constant operand. 2100 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2101} 2102 2103MachineMemOperand * 2104FastISel::createMachineMemOperandFor(const Instruction *I) const { 2105 const Value *Ptr; 2106 Type *ValTy; 2107 unsigned Alignment; 2108 unsigned Flags; 2109 bool IsVolatile; 2110 2111 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2112 Alignment = LI->getAlignment(); 2113 IsVolatile = LI->isVolatile(); 2114 Flags = MachineMemOperand::MOLoad; 2115 Ptr = LI->getPointerOperand(); 2116 ValTy = LI->getType(); 2117 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2118 Alignment = SI->getAlignment(); 2119 IsVolatile = SI->isVolatile(); 2120 Flags = MachineMemOperand::MOStore; 2121 Ptr = SI->getPointerOperand(); 2122 ValTy = SI->getValueOperand()->getType(); 2123 } else 2124 return nullptr; 2125 2126 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr; 2127 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 2128 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2129 2130 AAMDNodes AAInfo; 2131 I->getAAMetadata(AAInfo); 2132 2133 if (Alignment == 0) // Ensure that codegen never sees alignment 0. 2134 Alignment = DL.getABITypeAlignment(ValTy); 2135 2136 unsigned Size = DL.getTypeStoreSize(ValTy); 2137 2138 if (IsVolatile) 2139 Flags |= MachineMemOperand::MOVolatile; 2140 if (IsNonTemporal) 2141 Flags |= MachineMemOperand::MONonTemporal; 2142 if (IsInvariant) 2143 Flags |= MachineMemOperand::MOInvariant; 2144 2145 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2146 Alignment, AAInfo, Ranges); 2147} 2148 2149CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2150 // If both operands are the same, then try to optimize or fold the cmp. 2151 CmpInst::Predicate Predicate = CI->getPredicate(); 2152 if (CI->getOperand(0) != CI->getOperand(1)) 2153 return Predicate; 2154 2155 switch (Predicate) { 2156 default: llvm_unreachable("Invalid predicate!"); 2157 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2158 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2159 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2160 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2161 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2162 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2163 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2164 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2165 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2166 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2167 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2168 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2169 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2170 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2171 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2172 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2173 2174 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2175 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2176 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2177 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2178 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2179 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2180 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2181 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2182 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2183 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2184 } 2185 2186 return Predicate; 2187}
|