WebAssemblyISelLowering.cpp revision 321369
1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// 10/// \file 11/// \brief This file implements the WebAssemblyTargetLowering class. 12/// 13//===----------------------------------------------------------------------===// 14 15#include "WebAssemblyISelLowering.h" 16#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 17#include "WebAssemblyMachineFunctionInfo.h" 18#include "WebAssemblySubtarget.h" 19#include "WebAssemblyTargetMachine.h" 20#include "llvm/CodeGen/Analysis.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineJumpTableInfo.h" 23#include "llvm/CodeGen/MachineRegisterInfo.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/IR/DiagnosticInfo.h" 26#include "llvm/IR/DiagnosticPrinter.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/Intrinsics.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/ErrorHandling.h" 31#include "llvm/Support/raw_ostream.h" 32#include "llvm/Target/TargetOptions.h" 33using namespace llvm; 34 35#define DEBUG_TYPE "wasm-lower" 36 37WebAssemblyTargetLowering::WebAssemblyTargetLowering( 38 const TargetMachine &TM, const WebAssemblySubtarget &STI) 39 : TargetLowering(TM), Subtarget(&STI) { 40 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32; 41 42 // Booleans always contain 0 or 1. 43 setBooleanContents(ZeroOrOneBooleanContent); 44 // WebAssembly does not produce floating-point exceptions on normal floating 45 // point operations. 46 setHasFloatingPointExceptions(false); 47 // We don't know the microarchitecture here, so just reduce register pressure. 48 setSchedulingPreference(Sched::RegPressure); 49 // Tell ISel that we have a stack pointer. 50 setStackPointerRegisterToSaveRestore( 51 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); 52 // Set up the register classes. 53 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); 54 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); 55 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); 56 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); 57 if (Subtarget->hasSIMD128()) { 58 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass); 59 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); 60 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); 61 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); 62 } 63 // Compute derived properties from the register classes. 64 computeRegisterProperties(Subtarget->getRegisterInfo()); 65 66 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); 67 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); 68 setOperationAction(ISD::JumpTable, MVTPtr, Custom); 69 setOperationAction(ISD::BlockAddress, MVTPtr, Custom); 70 setOperationAction(ISD::BRIND, MVT::Other, Custom); 71 72 // Take the default expansion for va_arg, va_copy, and va_end. There is no 73 // default action for va_start, so we do that custom. 74 setOperationAction(ISD::VASTART, MVT::Other, Custom); 75 setOperationAction(ISD::VAARG, MVT::Other, Expand); 76 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 77 setOperationAction(ISD::VAEND, MVT::Other, Expand); 78 79 for (auto T : {MVT::f32, MVT::f64}) { 80 // Don't expand the floating-point types to constant pools. 81 setOperationAction(ISD::ConstantFP, T, Legal); 82 // Expand floating-point comparisons. 83 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE, 84 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) 85 setCondCodeAction(CC, T, Expand); 86 // Expand floating-point library function operators. 87 for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, 88 ISD::FMA}) 89 setOperationAction(Op, T, Expand); 90 // Note supported floating-point library function operators that otherwise 91 // default to expand. 92 for (auto Op : 93 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}) 94 setOperationAction(Op, T, Legal); 95 // Support minnan and maxnan, which otherwise default to expand. 96 setOperationAction(ISD::FMINNAN, T, Legal); 97 setOperationAction(ISD::FMAXNAN, T, Legal); 98 // WebAssembly currently has no builtin f16 support. 99 setOperationAction(ISD::FP16_TO_FP, T, Expand); 100 setOperationAction(ISD::FP_TO_FP16, T, Expand); 101 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand); 102 setTruncStoreAction(T, MVT::f16, Expand); 103 } 104 105 for (auto T : {MVT::i32, MVT::i64}) { 106 // Expand unavailable integer operations. 107 for (auto Op : 108 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, 109 ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, 110 ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, 111 ISD::SUBE}) { 112 setOperationAction(Op, T, Expand); 113 } 114 } 115 116 // As a special case, these operators use the type to mean the type to 117 // sign-extend from. 118 for (auto T : {MVT::i1, MVT::i8, MVT::i16, MVT::i32}) 119 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); 120 121 // Dynamic stack allocation: use the default expansion. 122 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 123 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 124 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); 125 126 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 127 setOperationAction(ISD::CopyToReg, MVT::Other, Custom); 128 129 // Expand these forms; we pattern-match the forms that we can handle in isel. 130 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) 131 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) 132 setOperationAction(Op, T, Expand); 133 134 // We have custom switch handling. 135 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 136 137 // WebAssembly doesn't have: 138 // - Floating-point extending loads. 139 // - Floating-point truncating stores. 140 // - i1 extending loads. 141 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 142 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 143 for (auto T : MVT::integer_valuetypes()) 144 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 145 setLoadExtAction(Ext, T, MVT::i1, Promote); 146 147 // Trap lowers to wasm unreachable 148 setOperationAction(ISD::TRAP, MVT::Other, Legal); 149} 150 151FastISel *WebAssemblyTargetLowering::createFastISel( 152 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const { 153 return WebAssembly::createFastISel(FuncInfo, LibInfo); 154} 155 156bool WebAssemblyTargetLowering::isOffsetFoldingLegal( 157 const GlobalAddressSDNode * /*GA*/) const { 158 // All offsets can be folded. 159 return true; 160} 161 162MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, 163 EVT VT) const { 164 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); 165 if (BitWidth > 1 && BitWidth < 8) BitWidth = 8; 166 167 if (BitWidth > 64) { 168 // The shift will be lowered to a libcall, and compiler-rt libcalls expect 169 // the count to be an i32. 170 BitWidth = 32; 171 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && 172 "32-bit shift counts ought to be enough for anyone"); 173 } 174 175 MVT Result = MVT::getIntegerVT(BitWidth); 176 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE && 177 "Unable to represent scalar shift amount type"); 178 return Result; 179} 180 181const char *WebAssemblyTargetLowering::getTargetNodeName( 182 unsigned Opcode) const { 183 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) { 184 case WebAssemblyISD::FIRST_NUMBER: 185 break; 186#define HANDLE_NODETYPE(NODE) \ 187 case WebAssemblyISD::NODE: \ 188 return "WebAssemblyISD::" #NODE; 189#include "WebAssemblyISD.def" 190#undef HANDLE_NODETYPE 191 } 192 return nullptr; 193} 194 195std::pair<unsigned, const TargetRegisterClass *> 196WebAssemblyTargetLowering::getRegForInlineAsmConstraint( 197 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 198 // First, see if this is a constraint that directly corresponds to a 199 // WebAssembly register class. 200 if (Constraint.size() == 1) { 201 switch (Constraint[0]) { 202 case 'r': 203 assert(VT != MVT::iPTR && "Pointer MVT not expected here"); 204 if (Subtarget->hasSIMD128() && VT.isVector()) { 205 if (VT.getSizeInBits() == 128) 206 return std::make_pair(0U, &WebAssembly::V128RegClass); 207 } 208 if (VT.isInteger() && !VT.isVector()) { 209 if (VT.getSizeInBits() <= 32) 210 return std::make_pair(0U, &WebAssembly::I32RegClass); 211 if (VT.getSizeInBits() <= 64) 212 return std::make_pair(0U, &WebAssembly::I64RegClass); 213 } 214 break; 215 default: 216 break; 217 } 218 } 219 220 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 221} 222 223bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const { 224 // Assume ctz is a relatively cheap operation. 225 return true; 226} 227 228bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const { 229 // Assume clz is a relatively cheap operation. 230 return true; 231} 232 233bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, 234 const AddrMode &AM, 235 Type *Ty, 236 unsigned AS) const { 237 // WebAssembly offsets are added as unsigned without wrapping. The 238 // isLegalAddressingMode gives us no way to determine if wrapping could be 239 // happening, so we approximate this by accepting only non-negative offsets. 240 if (AM.BaseOffs < 0) return false; 241 242 // WebAssembly has no scale register operands. 243 if (AM.Scale != 0) return false; 244 245 // Everything else is legal. 246 return true; 247} 248 249bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( 250 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const { 251 // WebAssembly supports unaligned accesses, though it should be declared 252 // with the p2align attribute on loads and stores which do so, and there 253 // may be a performance impact. We tell LLVM they're "fast" because 254 // for the kinds of things that LLVM uses this for (merging adjacent stores 255 // of constants, etc.), WebAssembly implementations will either want the 256 // unaligned access or they'll split anyway. 257 if (Fast) *Fast = true; 258 return true; 259} 260 261bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, 262 AttributeList Attr) const { 263 // The current thinking is that wasm engines will perform this optimization, 264 // so we can save on code size. 265 return true; 266} 267 268//===----------------------------------------------------------------------===// 269// WebAssembly Lowering private implementation. 270//===----------------------------------------------------------------------===// 271 272//===----------------------------------------------------------------------===// 273// Lowering Code 274//===----------------------------------------------------------------------===// 275 276static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) { 277 MachineFunction &MF = DAG.getMachineFunction(); 278 DAG.getContext()->diagnose( 279 DiagnosticInfoUnsupported(*MF.getFunction(), msg, DL.getDebugLoc())); 280} 281 282// Test whether the given calling convention is supported. 283static bool CallingConvSupported(CallingConv::ID CallConv) { 284 // We currently support the language-independent target-independent 285 // conventions. We don't yet have a way to annotate calls with properties like 286 // "cold", and we don't have any call-clobbered registers, so these are mostly 287 // all handled the same. 288 return CallConv == CallingConv::C || CallConv == CallingConv::Fast || 289 CallConv == CallingConv::Cold || 290 CallConv == CallingConv::PreserveMost || 291 CallConv == CallingConv::PreserveAll || 292 CallConv == CallingConv::CXX_FAST_TLS; 293} 294 295SDValue WebAssemblyTargetLowering::LowerCall( 296 CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { 297 SelectionDAG &DAG = CLI.DAG; 298 SDLoc DL = CLI.DL; 299 SDValue Chain = CLI.Chain; 300 SDValue Callee = CLI.Callee; 301 MachineFunction &MF = DAG.getMachineFunction(); 302 auto Layout = MF.getDataLayout(); 303 304 CallingConv::ID CallConv = CLI.CallConv; 305 if (!CallingConvSupported(CallConv)) 306 fail(DL, DAG, 307 "WebAssembly doesn't support language-specific or target-specific " 308 "calling conventions yet"); 309 if (CLI.IsPatchPoint) 310 fail(DL, DAG, "WebAssembly doesn't support patch point yet"); 311 312 // WebAssembly doesn't currently support explicit tail calls. If they are 313 // required, fail. Otherwise, just disable them. 314 if ((CallConv == CallingConv::Fast && CLI.IsTailCall && 315 MF.getTarget().Options.GuaranteedTailCallOpt) || 316 (CLI.CS && CLI.CS->isMustTailCall())) 317 fail(DL, DAG, "WebAssembly doesn't support tail call yet"); 318 CLI.IsTailCall = false; 319 320 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 321 if (Ins.size() > 1) 322 fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet"); 323 324 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 325 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 326 for (unsigned i = 0; i < Outs.size(); ++i) { 327 const ISD::OutputArg &Out = Outs[i]; 328 SDValue &OutVal = OutVals[i]; 329 if (Out.Flags.isNest()) 330 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 331 if (Out.Flags.isInAlloca()) 332 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 333 if (Out.Flags.isInConsecutiveRegs()) 334 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 335 if (Out.Flags.isInConsecutiveRegsLast()) 336 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 337 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) { 338 auto &MFI = MF.getFrameInfo(); 339 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(), 340 Out.Flags.getByValAlign(), 341 /*isSS=*/false); 342 SDValue SizeNode = 343 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32); 344 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 345 Chain = DAG.getMemcpy( 346 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(), 347 /*isVolatile*/ false, /*AlwaysInline=*/false, 348 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); 349 OutVal = FINode; 350 } 351 } 352 353 bool IsVarArg = CLI.IsVarArg; 354 unsigned NumFixedArgs = CLI.NumFixedArgs; 355 356 auto PtrVT = getPointerTy(Layout); 357 358 // Analyze operands of the call, assigning locations to each operand. 359 SmallVector<CCValAssign, 16> ArgLocs; 360 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 361 362 if (IsVarArg) { 363 // Outgoing non-fixed arguments are placed in a buffer. First 364 // compute their offsets and the total amount of buffer space needed. 365 for (SDValue Arg : 366 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { 367 EVT VT = Arg.getValueType(); 368 assert(VT != MVT::iPTR && "Legalized args should be concrete"); 369 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 370 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), 371 Layout.getABITypeAlignment(Ty)); 372 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), 373 Offset, VT.getSimpleVT(), 374 CCValAssign::Full)); 375 } 376 } 377 378 unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); 379 380 SDValue FINode; 381 if (IsVarArg && NumBytes) { 382 // For non-fixed arguments, next emit stores to store the argument values 383 // to the stack buffer at the offsets computed above. 384 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, 385 Layout.getStackAlignment(), 386 /*isSS=*/false); 387 unsigned ValNo = 0; 388 SmallVector<SDValue, 8> Chains; 389 for (SDValue Arg : 390 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { 391 assert(ArgLocs[ValNo].getValNo() == ValNo && 392 "ArgLocs should remain in order and only hold varargs args"); 393 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); 394 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 395 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode, 396 DAG.getConstant(Offset, DL, PtrVT)); 397 Chains.push_back(DAG.getStore( 398 Chain, DL, Arg, Add, 399 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0)); 400 } 401 if (!Chains.empty()) 402 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 403 } else if (IsVarArg) { 404 FINode = DAG.getIntPtrConstant(0, DL); 405 } 406 407 // Compute the operands for the CALLn node. 408 SmallVector<SDValue, 16> Ops; 409 Ops.push_back(Chain); 410 Ops.push_back(Callee); 411 412 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs 413 // isn't reliable. 414 Ops.append(OutVals.begin(), 415 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); 416 // Add a pointer to the vararg buffer. 417 if (IsVarArg) Ops.push_back(FINode); 418 419 SmallVector<EVT, 8> InTys; 420 for (const auto &In : Ins) { 421 assert(!In.Flags.isByVal() && "byval is not valid for return values"); 422 assert(!In.Flags.isNest() && "nest is not valid for return values"); 423 if (In.Flags.isInAlloca()) 424 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values"); 425 if (In.Flags.isInConsecutiveRegs()) 426 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values"); 427 if (In.Flags.isInConsecutiveRegsLast()) 428 fail(DL, DAG, 429 "WebAssembly hasn't implemented cons regs last return values"); 430 // Ignore In.getOrigAlign() because all our arguments are passed in 431 // registers. 432 InTys.push_back(In.VT); 433 } 434 InTys.push_back(MVT::Other); 435 SDVTList InTyList = DAG.getVTList(InTys); 436 SDValue Res = 437 DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1, 438 DL, InTyList, Ops); 439 if (Ins.empty()) { 440 Chain = Res; 441 } else { 442 InVals.push_back(Res); 443 Chain = Res.getValue(1); 444 } 445 446 return Chain; 447} 448 449bool WebAssemblyTargetLowering::CanLowerReturn( 450 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, 451 const SmallVectorImpl<ISD::OutputArg> &Outs, 452 LLVMContext & /*Context*/) const { 453 // WebAssembly can't currently handle returning tuples. 454 return Outs.size() <= 1; 455} 456 457SDValue WebAssemblyTargetLowering::LowerReturn( 458 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, 459 const SmallVectorImpl<ISD::OutputArg> &Outs, 460 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 461 SelectionDAG &DAG) const { 462 assert(Outs.size() <= 1 && "WebAssembly can only return up to one value"); 463 if (!CallingConvSupported(CallConv)) 464 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 465 466 SmallVector<SDValue, 4> RetOps(1, Chain); 467 RetOps.append(OutVals.begin(), OutVals.end()); 468 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); 469 470 // Record the number and types of the return values. 471 for (const ISD::OutputArg &Out : Outs) { 472 assert(!Out.Flags.isByVal() && "byval is not valid for return values"); 473 assert(!Out.Flags.isNest() && "nest is not valid for return values"); 474 assert(Out.IsFixed && "non-fixed return value is not valid"); 475 if (Out.Flags.isInAlloca()) 476 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); 477 if (Out.Flags.isInConsecutiveRegs()) 478 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); 479 if (Out.Flags.isInConsecutiveRegsLast()) 480 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); 481 } 482 483 return Chain; 484} 485 486SDValue WebAssemblyTargetLowering::LowerFormalArguments( 487 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 488 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 489 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 490 if (!CallingConvSupported(CallConv)) 491 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 492 493 MachineFunction &MF = DAG.getMachineFunction(); 494 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); 495 496 // Set up the incoming ARGUMENTS value, which serves to represent the liveness 497 // of the incoming values before they're represented by virtual registers. 498 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); 499 500 for (const ISD::InputArg &In : Ins) { 501 if (In.Flags.isInAlloca()) 502 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 503 if (In.Flags.isNest()) 504 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 505 if (In.Flags.isInConsecutiveRegs()) 506 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 507 if (In.Flags.isInConsecutiveRegsLast()) 508 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 509 // Ignore In.getOrigAlign() because all our arguments are passed in 510 // registers. 511 InVals.push_back( 512 In.Used 513 ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, 514 DAG.getTargetConstant(InVals.size(), DL, MVT::i32)) 515 : DAG.getUNDEF(In.VT)); 516 517 // Record the number and types of arguments. 518 MFI->addParam(In.VT); 519 } 520 521 // Varargs are copied into a buffer allocated by the caller, and a pointer to 522 // the buffer is passed as an argument. 523 if (IsVarArg) { 524 MVT PtrVT = getPointerTy(MF.getDataLayout()); 525 unsigned VarargVreg = 526 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); 527 MFI->setVarargBufferVreg(VarargVreg); 528 Chain = DAG.getCopyToReg( 529 Chain, DL, VarargVreg, 530 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, 531 DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); 532 MFI->addParam(PtrVT); 533 } 534 535 // Record the number and types of results. 536 SmallVector<MVT, 4> Params; 537 SmallVector<MVT, 4> Results; 538 ComputeSignatureVTs(*MF.getFunction(), DAG.getTarget(), Params, Results); 539 for (MVT VT : Results) 540 MFI->addResult(VT); 541 542 return Chain; 543} 544 545//===----------------------------------------------------------------------===// 546// Custom lowering hooks. 547//===----------------------------------------------------------------------===// 548 549SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, 550 SelectionDAG &DAG) const { 551 SDLoc DL(Op); 552 switch (Op.getOpcode()) { 553 default: 554 llvm_unreachable("unimplemented operation lowering"); 555 return SDValue(); 556 case ISD::FrameIndex: 557 return LowerFrameIndex(Op, DAG); 558 case ISD::GlobalAddress: 559 return LowerGlobalAddress(Op, DAG); 560 case ISD::ExternalSymbol: 561 return LowerExternalSymbol(Op, DAG); 562 case ISD::JumpTable: 563 return LowerJumpTable(Op, DAG); 564 case ISD::BR_JT: 565 return LowerBR_JT(Op, DAG); 566 case ISD::VASTART: 567 return LowerVASTART(Op, DAG); 568 case ISD::BlockAddress: 569 case ISD::BRIND: 570 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); 571 return SDValue(); 572 case ISD::RETURNADDR: // Probably nothing meaningful can be returned here. 573 fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address"); 574 return SDValue(); 575 case ISD::FRAMEADDR: 576 return LowerFRAMEADDR(Op, DAG); 577 case ISD::CopyToReg: 578 return LowerCopyToReg(Op, DAG); 579 } 580} 581 582SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, 583 SelectionDAG &DAG) const { 584 SDValue Src = Op.getOperand(2); 585 if (isa<FrameIndexSDNode>(Src.getNode())) { 586 // CopyToReg nodes don't support FrameIndex operands. Other targets select 587 // the FI to some LEA-like instruction, but since we don't have that, we 588 // need to insert some kind of instruction that can take an FI operand and 589 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy 590 // copy_local between Op and its FI operand. 591 SDValue Chain = Op.getOperand(0); 592 SDLoc DL(Op); 593 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); 594 EVT VT = Src.getValueType(); 595 SDValue Copy( 596 DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32 597 : WebAssembly::COPY_I64, 598 DL, VT, Src), 599 0); 600 return Op.getNode()->getNumValues() == 1 601 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) 602 : DAG.getCopyToReg(Chain, DL, Reg, Copy, Op.getNumOperands() == 4 603 ? Op.getOperand(3) 604 : SDValue()); 605 } 606 return SDValue(); 607} 608 609SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, 610 SelectionDAG &DAG) const { 611 int FI = cast<FrameIndexSDNode>(Op)->getIndex(); 612 return DAG.getTargetFrameIndex(FI, Op.getValueType()); 613} 614 615SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, 616 SelectionDAG &DAG) const { 617 // Non-zero depths are not supported by WebAssembly currently. Use the 618 // legalizer's default expansion, which is to return 0 (what this function is 619 // documented to do). 620 if (Op.getConstantOperandVal(0) > 0) 621 return SDValue(); 622 623 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); 624 EVT VT = Op.getValueType(); 625 unsigned FP = 626 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction()); 627 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT); 628} 629 630SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, 631 SelectionDAG &DAG) const { 632 SDLoc DL(Op); 633 const auto *GA = cast<GlobalAddressSDNode>(Op); 634 EVT VT = Op.getValueType(); 635 assert(GA->getTargetFlags() == 0 && 636 "Unexpected target flags on generic GlobalAddressSDNode"); 637 if (GA->getAddressSpace() != 0) 638 fail(DL, DAG, "WebAssembly only expects the 0 address space"); 639 return DAG.getNode( 640 WebAssemblyISD::Wrapper, DL, VT, 641 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset())); 642} 643 644SDValue WebAssemblyTargetLowering::LowerExternalSymbol( 645 SDValue Op, SelectionDAG &DAG) const { 646 SDLoc DL(Op); 647 const auto *ES = cast<ExternalSymbolSDNode>(Op); 648 EVT VT = Op.getValueType(); 649 assert(ES->getTargetFlags() == 0 && 650 "Unexpected target flags on generic ExternalSymbolSDNode"); 651 // Set the TargetFlags to 0x1 which indicates that this is a "function" 652 // symbol rather than a data symbol. We do this unconditionally even though 653 // we don't know anything about the symbol other than its name, because all 654 // external symbols used in target-independent SelectionDAG code are for 655 // functions. 656 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 657 DAG.getTargetExternalSymbol(ES->getSymbol(), VT, 658 /*TargetFlags=*/0x1)); 659} 660 661SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, 662 SelectionDAG &DAG) const { 663 // There's no need for a Wrapper node because we always incorporate a jump 664 // table operand into a BR_TABLE instruction, rather than ever 665 // materializing it in a register. 666 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 667 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), 668 JT->getTargetFlags()); 669} 670 671SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, 672 SelectionDAG &DAG) const { 673 SDLoc DL(Op); 674 SDValue Chain = Op.getOperand(0); 675 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1)); 676 SDValue Index = Op.getOperand(2); 677 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); 678 679 SmallVector<SDValue, 8> Ops; 680 Ops.push_back(Chain); 681 Ops.push_back(Index); 682 683 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); 684 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; 685 686 // Add an operand for each case. 687 for (auto MBB : MBBs) Ops.push_back(DAG.getBasicBlock(MBB)); 688 689 // TODO: For now, we just pick something arbitrary for a default case for now. 690 // We really want to sniff out the guard and put in the real default case (and 691 // delete the guard). 692 Ops.push_back(DAG.getBasicBlock(MBBs[0])); 693 694 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops); 695} 696 697SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, 698 SelectionDAG &DAG) const { 699 SDLoc DL(Op); 700 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); 701 702 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>(); 703 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 704 705 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL, 706 MFI->getVarargBufferVreg(), PtrVT); 707 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1), 708 MachinePointerInfo(SV), 0); 709} 710 711//===----------------------------------------------------------------------===// 712// WebAssembly Optimization Hooks 713//===----------------------------------------------------------------------===// 714