XCoreISelLowering.cpp revision 199989
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the XCoreTargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "xcore-lower" 15 16#include "XCoreISelLowering.h" 17#include "XCoreMachineFunctionInfo.h" 18#include "XCore.h" 19#include "XCoreTargetObjectFile.h" 20#include "XCoreTargetMachine.h" 21#include "XCoreSubtarget.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/CallingConv.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/GlobalAlias.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/SelectionDAGISel.h" 34#include "llvm/CodeGen/ValueTypes.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/ADT/VectorExtras.h" 39#include <queue> 40#include <set> 41using namespace llvm; 42 43const char *XCoreTargetLowering:: 44getTargetNodeName(unsigned Opcode) const 45{ 46 switch (Opcode) 47 { 48 case XCoreISD::BL : return "XCoreISD::BL"; 49 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 50 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 51 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 52 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 53 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 54 case XCoreISD::LADD : return "XCoreISD::LADD"; 55 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 56 default : return NULL; 57 } 58} 59 60XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) 61 : TargetLowering(XTM, new XCoreTargetObjectFile()), 62 TM(XTM), 63 Subtarget(*XTM.getSubtargetImpl()) { 64 65 // Set up the register classes. 66 addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); 67 68 // Compute derived properties from the register classes 69 computeRegisterProperties(); 70 71 // Division is expensive 72 setIntDivIsCheap(false); 73 74 setShiftAmountType(MVT::i32); 75 setStackPointerRegisterToSaveRestore(XCore::SP); 76 77 setSchedulingPreference(SchedulingForRegPressure); 78 79 // Use i32 for setcc operations results (slt, sgt, ...). 80 setBooleanContents(ZeroOrOneBooleanContent); 81 82 // XCore does not have the NodeTypes below. 83 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 84 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 85 setOperationAction(ISD::ADDC, MVT::i32, Expand); 86 setOperationAction(ISD::ADDE, MVT::i32, Expand); 87 setOperationAction(ISD::SUBC, MVT::i32, Expand); 88 setOperationAction(ISD::SUBE, MVT::i32, Expand); 89 90 // Stop the combiner recombining select and set_cc 91 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 92 93 // 64bit 94 setOperationAction(ISD::ADD, MVT::i64, Custom); 95 setOperationAction(ISD::SUB, MVT::i64, Custom); 96 setOperationAction(ISD::MULHS, MVT::i32, Expand); 97 setOperationAction(ISD::MULHU, MVT::i32, Expand); 98 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 99 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 100 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 101 102 // Bit Manipulation 103 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 104 setOperationAction(ISD::ROTL , MVT::i32, Expand); 105 setOperationAction(ISD::ROTR , MVT::i32, Expand); 106 107 setOperationAction(ISD::TRAP, MVT::Other, Legal); 108 109 // Expand jump tables for now 110 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 111 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 112 113 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 114 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 115 116 // Thread Local Storage 117 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 118 119 // Conversion of i64 -> double produces constantpool nodes 120 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 121 122 // Loads 123 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 124 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 125 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 126 127 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 128 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 129 130 // Custom expand misaligned loads / stores. 131 setOperationAction(ISD::LOAD, MVT::i32, Custom); 132 setOperationAction(ISD::STORE, MVT::i32, Custom); 133 134 // Varargs 135 setOperationAction(ISD::VAEND, MVT::Other, Expand); 136 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 137 setOperationAction(ISD::VAARG, MVT::Other, Custom); 138 setOperationAction(ISD::VASTART, MVT::Other, Custom); 139 140 // Dynamic stack 141 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 142 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 143 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 144 145 maxStoresPerMemset = 4; 146 maxStoresPerMemmove = maxStoresPerMemcpy = 2; 147 148 // We have target-specific dag combine patterns for the following nodes: 149 setTargetDAGCombine(ISD::STORE); 150} 151 152SDValue XCoreTargetLowering:: 153LowerOperation(SDValue Op, SelectionDAG &DAG) { 154 switch (Op.getOpcode()) 155 { 156 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 157 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 158 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 159 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 160 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 161 case ISD::LOAD: return LowerLOAD(Op, DAG); 162 case ISD::STORE: return LowerSTORE(Op, DAG); 163 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 164 case ISD::VAARG: return LowerVAARG(Op, DAG); 165 case ISD::VASTART: return LowerVASTART(Op, DAG); 166 // FIXME: Remove these when LegalizeDAGTypes lands. 167 case ISD::ADD: 168 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 169 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 170 default: 171 llvm_unreachable("unimplemented operand"); 172 return SDValue(); 173 } 174} 175 176/// ReplaceNodeResults - Replace the results of node with an illegal result 177/// type with new values built out of custom code. 178void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 179 SmallVectorImpl<SDValue>&Results, 180 SelectionDAG &DAG) { 181 switch (N->getOpcode()) { 182 default: 183 llvm_unreachable("Don't know how to custom expand this!"); 184 return; 185 case ISD::ADD: 186 case ISD::SUB: 187 Results.push_back(ExpandADDSUB(N, DAG)); 188 return; 189 } 190} 191 192/// getFunctionAlignment - Return the Log2 alignment of this function. 193unsigned XCoreTargetLowering:: 194getFunctionAlignment(const Function *) const { 195 return 1; 196} 197 198//===----------------------------------------------------------------------===// 199// Misc Lower Operation implementation 200//===----------------------------------------------------------------------===// 201 202SDValue XCoreTargetLowering:: 203LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) 204{ 205 DebugLoc dl = Op.getDebugLoc(); 206 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), 207 Op.getOperand(3), Op.getOperand(4)); 208 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), 209 Op.getOperand(1)); 210} 211 212SDValue XCoreTargetLowering:: 213getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG) 214{ 215 // FIXME there is no actual debug info here 216 DebugLoc dl = GA.getDebugLoc(); 217 if (isa<Function>(GV)) { 218 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 219 } 220 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 221 if (!GVar) { 222 // If GV is an alias then use the aliasee to determine constness 223 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 224 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 225 } 226 bool isConst = GVar && GVar->isConstant(); 227 if (isConst) { 228 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 229 } 230 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 231} 232 233SDValue XCoreTargetLowering:: 234LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) 235{ 236 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 237 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); 238 // If it's a debug information descriptor, don't mess with it. 239 if (DAG.isVerifiedDebugInfoDesc(Op)) 240 return GA; 241 return getGlobalAddressWrapper(GA, GV, DAG); 242} 243 244static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { 245 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 246 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); 247} 248 249static inline bool isZeroLengthArray(const Type *Ty) { 250 const ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty); 251 return AT && (AT->getNumElements() == 0); 252} 253 254SDValue XCoreTargetLowering:: 255LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) 256{ 257 // FIXME there isn't really debug info here 258 DebugLoc dl = Op.getDebugLoc(); 259 // transform to label + getid() * size 260 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 261 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); 262 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 263 if (!GVar) { 264 // If GV is an alias then use the aliasee to determine size 265 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 266 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 267 } 268 if (! GVar) { 269 llvm_unreachable("Thread local object not a GlobalVariable?"); 270 return SDValue(); 271 } 272 const Type *Ty = cast<PointerType>(GV->getType())->getElementType(); 273 if (!Ty->isSized() || isZeroLengthArray(Ty)) { 274#ifndef NDEBUG 275 errs() << "Size of thread local object " << GVar->getName() 276 << " is unknown\n"; 277#endif 278 llvm_unreachable(0); 279 } 280 SDValue base = getGlobalAddressWrapper(GA, GV, DAG); 281 const TargetData *TD = TM.getTargetData(); 282 unsigned Size = TD->getTypeAllocSize(Ty); 283 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), 284 DAG.getConstant(Size, MVT::i32)); 285 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); 286} 287 288SDValue XCoreTargetLowering:: 289LowerBlockAddress(SDValue Op, SelectionDAG &DAG) 290{ 291 DebugLoc DL = Op.getDebugLoc(); 292 293 BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 294 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true); 295 296 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); 297} 298 299SDValue XCoreTargetLowering:: 300LowerConstantPool(SDValue Op, SelectionDAG &DAG) 301{ 302 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 303 // FIXME there isn't really debug info here 304 DebugLoc dl = CP->getDebugLoc(); 305 EVT PtrVT = Op.getValueType(); 306 SDValue Res; 307 if (CP->isMachineConstantPoolEntry()) { 308 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 309 CP->getAlignment()); 310 } else { 311 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 312 CP->getAlignment()); 313 } 314 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 315} 316 317SDValue XCoreTargetLowering:: 318LowerJumpTable(SDValue Op, SelectionDAG &DAG) 319{ 320 // FIXME there isn't really debug info here 321 DebugLoc dl = Op.getDebugLoc(); 322 EVT PtrVT = Op.getValueType(); 323 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 324 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 325 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI); 326} 327 328static bool 329IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, 330 int64_t &Offset) 331{ 332 if (Addr.getOpcode() != ISD::ADD) { 333 return false; 334 } 335 ConstantSDNode *CN = 0; 336 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 337 return false; 338 } 339 int64_t off = CN->getSExtValue(); 340 const SDValue &Base = Addr.getOperand(0); 341 const SDValue *Root = &Base; 342 if (Base.getOpcode() == ISD::ADD && 343 Base.getOperand(1).getOpcode() == ISD::SHL) { 344 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1) 345 .getOperand(1)); 346 if (CN && (CN->getSExtValue() >= 2)) { 347 Root = &Base.getOperand(0); 348 } 349 } 350 if (isa<FrameIndexSDNode>(*Root)) { 351 // All frame indicies are word aligned 352 AlignedBase = Base; 353 Offset = off; 354 return true; 355 } 356 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || 357 Root->getOpcode() == XCoreISD::CPRelativeWrapper) { 358 // All dp / cp relative addresses are word aligned 359 AlignedBase = Base; 360 Offset = off; 361 return true; 362 } 363 return false; 364} 365 366SDValue XCoreTargetLowering:: 367LowerLOAD(SDValue Op, SelectionDAG &DAG) 368{ 369 LoadSDNode *LD = cast<LoadSDNode>(Op); 370 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 371 "Unexpected extension type"); 372 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 373 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 374 return SDValue(); 375 } 376 unsigned ABIAlignment = getTargetData()-> 377 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 378 // Leave aligned load alone. 379 if (LD->getAlignment() >= ABIAlignment) { 380 return SDValue(); 381 } 382 SDValue Chain = LD->getChain(); 383 SDValue BasePtr = LD->getBasePtr(); 384 DebugLoc dl = Op.getDebugLoc(); 385 386 SDValue Base; 387 int64_t Offset; 388 if (!LD->isVolatile() && 389 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { 390 if (Offset % 4 == 0) { 391 // We've managed to infer better alignment information than the load 392 // already has. Use an aligned load. 393 return DAG.getLoad(getPointerTy(), dl, Chain, BasePtr, NULL, 4); 394 } 395 // Lower to 396 // ldw low, base[offset >> 2] 397 // ldw high, base[(offset >> 2) + 1] 398 // shr low_shifted, low, (offset & 0x3) * 8 399 // shl high_shifted, high, 32 - (offset & 0x3) * 8 400 // or result, low_shifted, high_shifted 401 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); 402 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); 403 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); 404 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); 405 406 SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset); 407 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset); 408 409 SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain, 410 LowAddr, NULL, 4); 411 SDValue High = DAG.getLoad(getPointerTy(), dl, Chain, 412 HighAddr, NULL, 4); 413 SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift); 414 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift); 415 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted); 416 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), 417 High.getValue(1)); 418 SDValue Ops[] = { Result, Chain }; 419 return DAG.getMergeValues(Ops, 2, dl); 420 } 421 422 if (LD->getAlignment() == 2) { 423 int SVOffset = LD->getSrcValueOffset(); 424 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, 425 BasePtr, LD->getSrcValue(), SVOffset, MVT::i16, 426 LD->isVolatile(), 2); 427 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 428 DAG.getConstant(2, MVT::i32)); 429 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain, 430 HighAddr, LD->getSrcValue(), SVOffset + 2, 431 MVT::i16, LD->isVolatile(), 2); 432 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, 433 DAG.getConstant(16, MVT::i32)); 434 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted); 435 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), 436 High.getValue(1)); 437 SDValue Ops[] = { Result, Chain }; 438 return DAG.getMergeValues(Ops, 2, dl); 439 } 440 441 // Lower to a call to __misaligned_load(BasePtr). 442 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 443 TargetLowering::ArgListTy Args; 444 TargetLowering::ArgListEntry Entry; 445 446 Entry.Ty = IntPtrTy; 447 Entry.Node = BasePtr; 448 Args.push_back(Entry); 449 450 std::pair<SDValue, SDValue> CallResult = 451 LowerCallTo(Chain, IntPtrTy, false, false, 452 false, false, 0, CallingConv::C, false, 453 /*isReturnValueUsed=*/true, 454 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 455 Args, DAG, dl); 456 457 SDValue Ops[] = 458 { CallResult.first, CallResult.second }; 459 460 return DAG.getMergeValues(Ops, 2, dl); 461} 462 463SDValue XCoreTargetLowering:: 464LowerSTORE(SDValue Op, SelectionDAG &DAG) 465{ 466 StoreSDNode *ST = cast<StoreSDNode>(Op); 467 assert(!ST->isTruncatingStore() && "Unexpected store type"); 468 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 469 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 470 return SDValue(); 471 } 472 unsigned ABIAlignment = getTargetData()-> 473 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 474 // Leave aligned store alone. 475 if (ST->getAlignment() >= ABIAlignment) { 476 return SDValue(); 477 } 478 SDValue Chain = ST->getChain(); 479 SDValue BasePtr = ST->getBasePtr(); 480 SDValue Value = ST->getValue(); 481 DebugLoc dl = Op.getDebugLoc(); 482 483 if (ST->getAlignment() == 2) { 484 int SVOffset = ST->getSrcValueOffset(); 485 SDValue Low = Value; 486 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 487 DAG.getConstant(16, MVT::i32)); 488 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 489 ST->getSrcValue(), SVOffset, MVT::i16, 490 ST->isVolatile(), 2); 491 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 492 DAG.getConstant(2, MVT::i32)); 493 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 494 ST->getSrcValue(), SVOffset + 2, 495 MVT::i16, ST->isVolatile(), 2); 496 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 497 } 498 499 // Lower to a call to __misaligned_store(BasePtr, Value). 500 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 501 TargetLowering::ArgListTy Args; 502 TargetLowering::ArgListEntry Entry; 503 504 Entry.Ty = IntPtrTy; 505 Entry.Node = BasePtr; 506 Args.push_back(Entry); 507 508 Entry.Node = Value; 509 Args.push_back(Entry); 510 511 std::pair<SDValue, SDValue> CallResult = 512 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, 513 false, false, 0, CallingConv::C, false, 514 /*isReturnValueUsed=*/true, 515 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 516 Args, DAG, dl); 517 518 return CallResult.second; 519} 520 521SDValue XCoreTargetLowering:: 522ExpandADDSUB(SDNode *N, SelectionDAG &DAG) 523{ 524 assert(N->getValueType(0) == MVT::i64 && 525 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 526 "Unknown operand to lower!"); 527 DebugLoc dl = N->getDebugLoc(); 528 529 // Extract components 530 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 531 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 532 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 533 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 534 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 535 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 536 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 537 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 538 539 // Expand 540 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 541 XCoreISD::LSUB; 542 SDValue Zero = DAG.getConstant(0, MVT::i32); 543 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 544 LHSL, RHSL, Zero); 545 SDValue Lo(Carry.getNode(), 1); 546 547 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 548 LHSH, RHSH, Carry); 549 SDValue Hi(Ignored.getNode(), 1); 550 // Merge the pieces 551 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 552} 553 554SDValue XCoreTargetLowering:: 555LowerVAARG(SDValue Op, SelectionDAG &DAG) 556{ 557 llvm_unreachable("unimplemented"); 558 // FIX Arguments passed by reference need a extra dereference. 559 SDNode *Node = Op.getNode(); 560 DebugLoc dl = Node->getDebugLoc(); 561 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 562 EVT VT = Node->getValueType(0); 563 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), 564 Node->getOperand(1), V, 0); 565 // Increment the pointer, VAList, to the next vararg 566 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, 567 DAG.getConstant(VT.getSizeInBits(), 568 getPointerTy())); 569 // Store the incremented VAList to the legalized pointer 570 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), V, 0); 571 // Load the actual argument out of the pointer VAList 572 return DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0); 573} 574 575SDValue XCoreTargetLowering:: 576LowerVASTART(SDValue Op, SelectionDAG &DAG) 577{ 578 DebugLoc dl = Op.getDebugLoc(); 579 // vastart stores the address of the VarArgsFrameIndex slot into the 580 // memory location argument 581 MachineFunction &MF = DAG.getMachineFunction(); 582 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 583 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 584 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 585 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0); 586} 587 588SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 589 DebugLoc dl = Op.getDebugLoc(); 590 // Depths > 0 not supported yet! 591 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 592 return SDValue(); 593 594 MachineFunction &MF = DAG.getMachineFunction(); 595 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 596 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 597 RegInfo->getFrameRegister(MF), MVT::i32); 598} 599 600//===----------------------------------------------------------------------===// 601// Calling Convention Implementation 602//===----------------------------------------------------------------------===// 603 604#include "XCoreGenCallingConv.inc" 605 606//===----------------------------------------------------------------------===// 607// Call Calling Convention Implementation 608//===----------------------------------------------------------------------===// 609 610/// XCore call implementation 611SDValue 612XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 613 CallingConv::ID CallConv, bool isVarArg, 614 bool isTailCall, 615 const SmallVectorImpl<ISD::OutputArg> &Outs, 616 const SmallVectorImpl<ISD::InputArg> &Ins, 617 DebugLoc dl, SelectionDAG &DAG, 618 SmallVectorImpl<SDValue> &InVals) { 619 620 // For now, only CallingConv::C implemented 621 switch (CallConv) 622 { 623 default: 624 llvm_unreachable("Unsupported calling convention"); 625 case CallingConv::Fast: 626 case CallingConv::C: 627 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 628 Outs, Ins, dl, DAG, InVals); 629 } 630} 631 632/// LowerCCCCallTo - functions arguments are copied from virtual 633/// regs to (physical regs)/(stack frame), CALLSEQ_START and 634/// CALLSEQ_END are emitted. 635/// TODO: isTailCall, sret. 636SDValue 637XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 638 CallingConv::ID CallConv, bool isVarArg, 639 bool isTailCall, 640 const SmallVectorImpl<ISD::OutputArg> &Outs, 641 const SmallVectorImpl<ISD::InputArg> &Ins, 642 DebugLoc dl, SelectionDAG &DAG, 643 SmallVectorImpl<SDValue> &InVals) { 644 645 // Analyze operands of the call, assigning locations to each operand. 646 SmallVector<CCValAssign, 16> ArgLocs; 647 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 648 ArgLocs, *DAG.getContext()); 649 650 // The ABI dictates there should be one stack slot available to the callee 651 // on function entry (for saving lr). 652 CCInfo.AllocateStack(4, 4); 653 654 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 655 656 // Get a count of how many bytes are to be pushed on the stack. 657 unsigned NumBytes = CCInfo.getNextStackOffset(); 658 659 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 660 getPointerTy(), true)); 661 662 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 663 SmallVector<SDValue, 12> MemOpChains; 664 665 // Walk the register/memloc assignments, inserting copies/loads. 666 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 667 CCValAssign &VA = ArgLocs[i]; 668 SDValue Arg = Outs[i].Val; 669 670 // Promote the value if needed. 671 switch (VA.getLocInfo()) { 672 default: llvm_unreachable("Unknown loc info!"); 673 case CCValAssign::Full: break; 674 case CCValAssign::SExt: 675 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 676 break; 677 case CCValAssign::ZExt: 678 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 679 break; 680 case CCValAssign::AExt: 681 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 682 break; 683 } 684 685 // Arguments that can be passed on register must be kept at 686 // RegsToPass vector 687 if (VA.isRegLoc()) { 688 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 689 } else { 690 assert(VA.isMemLoc()); 691 692 int Offset = VA.getLocMemOffset(); 693 694 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 695 Chain, Arg, 696 DAG.getConstant(Offset/4, MVT::i32))); 697 } 698 } 699 700 // Transform all store nodes into one single node because 701 // all store nodes are independent of each other. 702 if (!MemOpChains.empty()) 703 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 704 &MemOpChains[0], MemOpChains.size()); 705 706 // Build a sequence of copy-to-reg nodes chained together with token 707 // chain and flag operands which copy the outgoing args into registers. 708 // The InFlag in necessary since all emited instructions must be 709 // stuck together. 710 SDValue InFlag; 711 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 712 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 713 RegsToPass[i].second, InFlag); 714 InFlag = Chain.getValue(1); 715 } 716 717 // If the callee is a GlobalAddress node (quite common, every direct call is) 718 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 719 // Likewise ExternalSymbol -> TargetExternalSymbol. 720 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 721 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); 722 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 723 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 724 725 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 726 // = Chain, Callee, Reg#1, Reg#2, ... 727 // 728 // Returns a chain & a flag for retval copy to use. 729 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 730 SmallVector<SDValue, 8> Ops; 731 Ops.push_back(Chain); 732 Ops.push_back(Callee); 733 734 // Add argument registers to the end of the list so that they are 735 // known live into the call. 736 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 737 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 738 RegsToPass[i].second.getValueType())); 739 740 if (InFlag.getNode()) 741 Ops.push_back(InFlag); 742 743 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); 744 InFlag = Chain.getValue(1); 745 746 // Create the CALLSEQ_END node. 747 Chain = DAG.getCALLSEQ_END(Chain, 748 DAG.getConstant(NumBytes, getPointerTy(), true), 749 DAG.getConstant(0, getPointerTy(), true), 750 InFlag); 751 InFlag = Chain.getValue(1); 752 753 // Handle result values, copying them out of physregs into vregs that we 754 // return. 755 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 756 Ins, dl, DAG, InVals); 757} 758 759/// LowerCallResult - Lower the result values of a call into the 760/// appropriate copies out of appropriate physical registers. 761SDValue 762XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 763 CallingConv::ID CallConv, bool isVarArg, 764 const SmallVectorImpl<ISD::InputArg> &Ins, 765 DebugLoc dl, SelectionDAG &DAG, 766 SmallVectorImpl<SDValue> &InVals) { 767 768 // Assign locations to each value returned by this call. 769 SmallVector<CCValAssign, 16> RVLocs; 770 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 771 RVLocs, *DAG.getContext()); 772 773 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 774 775 // Copy all of the result registers out of their specified physreg. 776 for (unsigned i = 0; i != RVLocs.size(); ++i) { 777 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 778 RVLocs[i].getValVT(), InFlag).getValue(1); 779 InFlag = Chain.getValue(2); 780 InVals.push_back(Chain.getValue(0)); 781 } 782 783 return Chain; 784} 785 786//===----------------------------------------------------------------------===// 787// Formal Arguments Calling Convention Implementation 788//===----------------------------------------------------------------------===// 789 790/// XCore formal arguments implementation 791SDValue 792XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 793 CallingConv::ID CallConv, 794 bool isVarArg, 795 const SmallVectorImpl<ISD::InputArg> &Ins, 796 DebugLoc dl, 797 SelectionDAG &DAG, 798 SmallVectorImpl<SDValue> &InVals) { 799 switch (CallConv) 800 { 801 default: 802 llvm_unreachable("Unsupported calling convention"); 803 case CallingConv::C: 804 case CallingConv::Fast: 805 return LowerCCCArguments(Chain, CallConv, isVarArg, 806 Ins, dl, DAG, InVals); 807 } 808} 809 810/// LowerCCCArguments - transform physical registers into 811/// virtual registers and generate load operations for 812/// arguments places on the stack. 813/// TODO: sret 814SDValue 815XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 816 CallingConv::ID CallConv, 817 bool isVarArg, 818 const SmallVectorImpl<ISD::InputArg> 819 &Ins, 820 DebugLoc dl, 821 SelectionDAG &DAG, 822 SmallVectorImpl<SDValue> &InVals) { 823 MachineFunction &MF = DAG.getMachineFunction(); 824 MachineFrameInfo *MFI = MF.getFrameInfo(); 825 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 826 827 // Assign locations to all of the incoming arguments. 828 SmallVector<CCValAssign, 16> ArgLocs; 829 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 830 ArgLocs, *DAG.getContext()); 831 832 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 833 834 unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize(); 835 836 unsigned LRSaveSize = StackSlotSize; 837 838 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 839 840 CCValAssign &VA = ArgLocs[i]; 841 842 if (VA.isRegLoc()) { 843 // Arguments passed in registers 844 EVT RegVT = VA.getLocVT(); 845 switch (RegVT.getSimpleVT().SimpleTy) { 846 default: 847 { 848#ifndef NDEBUG 849 errs() << "LowerFormalArguments Unhandled argument type: " 850 << RegVT.getSimpleVT().SimpleTy << "\n"; 851#endif 852 llvm_unreachable(0); 853 } 854 case MVT::i32: 855 unsigned VReg = RegInfo.createVirtualRegister( 856 XCore::GRRegsRegisterClass); 857 RegInfo.addLiveIn(VA.getLocReg(), VReg); 858 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 859 } 860 } else { 861 // sanity check 862 assert(VA.isMemLoc()); 863 // Load the argument to a virtual register 864 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 865 if (ObjSize > StackSlotSize) { 866 errs() << "LowerFormalArguments Unhandled argument type: " 867 << (unsigned)VA.getLocVT().getSimpleVT().SimpleTy 868 << "\n"; 869 } 870 // Create the frame index object for this incoming parameter... 871 int FI = MFI->CreateFixedObject(ObjSize, 872 LRSaveSize + VA.getLocMemOffset(), 873 true, false); 874 875 // Create the SelectionDAG nodes corresponding to a load 876 //from this parameter 877 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 878 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0)); 879 } 880 } 881 882 if (isVarArg) { 883 /* Argument registers */ 884 static const unsigned ArgRegs[] = { 885 XCore::R0, XCore::R1, XCore::R2, XCore::R3 886 }; 887 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 888 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 889 array_lengthof(ArgRegs)); 890 if (FirstVAReg < array_lengthof(ArgRegs)) { 891 SmallVector<SDValue, 4> MemOps; 892 int offset = 0; 893 // Save remaining registers, storing higher register numbers at a higher 894 // address 895 for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) { 896 // Create a stack slot 897 int FI = MFI->CreateFixedObject(4, offset, true, false); 898 if (i == FirstVAReg) { 899 XFI->setVarArgsFrameIndex(FI); 900 } 901 offset -= StackSlotSize; 902 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 903 // Move argument from phys reg -> virt reg 904 unsigned VReg = RegInfo.createVirtualRegister( 905 XCore::GRRegsRegisterClass); 906 RegInfo.addLiveIn(ArgRegs[i], VReg); 907 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 908 // Move argument from virt reg -> stack 909 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 910 MemOps.push_back(Store); 911 } 912 if (!MemOps.empty()) 913 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 914 &MemOps[0], MemOps.size()); 915 } else { 916 // This will point to the next argument passed via stack. 917 XFI->setVarArgsFrameIndex( 918 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 919 true, false)); 920 } 921 } 922 923 return Chain; 924} 925 926//===----------------------------------------------------------------------===// 927// Return Value Calling Convention Implementation 928//===----------------------------------------------------------------------===// 929 930bool XCoreTargetLowering:: 931CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, 932 const SmallVectorImpl<EVT> &OutTys, 933 const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, 934 SelectionDAG &DAG) { 935 SmallVector<CCValAssign, 16> RVLocs; 936 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 937 RVLocs, *DAG.getContext()); 938 return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore); 939} 940 941SDValue 942XCoreTargetLowering::LowerReturn(SDValue Chain, 943 CallingConv::ID CallConv, bool isVarArg, 944 const SmallVectorImpl<ISD::OutputArg> &Outs, 945 DebugLoc dl, SelectionDAG &DAG) { 946 947 // CCValAssign - represent the assignment of 948 // the return value to a location 949 SmallVector<CCValAssign, 16> RVLocs; 950 951 // CCState - Info about the registers and stack slot. 952 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 953 RVLocs, *DAG.getContext()); 954 955 // Analize return values. 956 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 957 958 // If this is the first return lowered for this function, add 959 // the regs to the liveout set for the function. 960 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 961 for (unsigned i = 0; i != RVLocs.size(); ++i) 962 if (RVLocs[i].isRegLoc()) 963 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 964 } 965 966 SDValue Flag; 967 968 // Copy the result values into the output registers. 969 for (unsigned i = 0; i != RVLocs.size(); ++i) { 970 CCValAssign &VA = RVLocs[i]; 971 assert(VA.isRegLoc() && "Can only return in registers!"); 972 973 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 974 Outs[i].Val, Flag); 975 976 // guarantee that all emitted copies are 977 // stuck together, avoiding something bad 978 Flag = Chain.getValue(1); 979 } 980 981 // Return on XCore is always a "retsp 0" 982 if (Flag.getNode()) 983 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 984 Chain, DAG.getConstant(0, MVT::i32), Flag); 985 else // Return Void 986 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 987 Chain, DAG.getConstant(0, MVT::i32)); 988} 989 990//===----------------------------------------------------------------------===// 991// Other Lowering Code 992//===----------------------------------------------------------------------===// 993 994MachineBasicBlock * 995XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 996 MachineBasicBlock *BB, 997 DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const { 998 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 999 DebugLoc dl = MI->getDebugLoc(); 1000 assert((MI->getOpcode() == XCore::SELECT_CC) && 1001 "Unexpected instr type to insert"); 1002 1003 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1004 // control-flow pattern. The incoming instruction knows the destination vreg 1005 // to set, the condition code register to branch on, the true/false values to 1006 // select between, and a branch opcode to use. 1007 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1008 MachineFunction::iterator It = BB; 1009 ++It; 1010 1011 // thisMBB: 1012 // ... 1013 // TrueVal = ... 1014 // cmpTY ccX, r1, r2 1015 // bCC copy1MBB 1016 // fallthrough --> copy0MBB 1017 MachineBasicBlock *thisMBB = BB; 1018 MachineFunction *F = BB->getParent(); 1019 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1020 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1021 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1022 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1023 F->insert(It, copy0MBB); 1024 F->insert(It, sinkMBB); 1025 // Update machine-CFG edges by first adding all successors of the current 1026 // block to the new block which will contain the Phi node for the select. 1027 // Also inform sdisel of the edge changes. 1028 for (MachineBasicBlock::succ_iterator I = BB->succ_begin(), 1029 E = BB->succ_end(); I != E; ++I) { 1030 EM->insert(std::make_pair(*I, sinkMBB)); 1031 sinkMBB->addSuccessor(*I); 1032 } 1033 // Next, remove all successors of the current block, and add the true 1034 // and fallthrough blocks as its successors. 1035 while (!BB->succ_empty()) 1036 BB->removeSuccessor(BB->succ_begin()); 1037 // Next, add the true and fallthrough blocks as its successors. 1038 BB->addSuccessor(copy0MBB); 1039 BB->addSuccessor(sinkMBB); 1040 1041 // copy0MBB: 1042 // %FalseValue = ... 1043 // # fallthrough to sinkMBB 1044 BB = copy0MBB; 1045 1046 // Update machine-CFG edges 1047 BB->addSuccessor(sinkMBB); 1048 1049 // sinkMBB: 1050 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1051 // ... 1052 BB = sinkMBB; 1053 BuildMI(BB, dl, TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1054 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1055 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1056 1057 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 1058 return BB; 1059} 1060 1061//===----------------------------------------------------------------------===// 1062// Target Optimization Hooks 1063//===----------------------------------------------------------------------===// 1064 1065SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1066 DAGCombinerInfo &DCI) const { 1067 SelectionDAG &DAG = DCI.DAG; 1068 DebugLoc dl = N->getDebugLoc(); 1069 switch (N->getOpcode()) { 1070 default: break; 1071 case ISD::STORE: { 1072 // Replace unaligned store of unaligned load with memmove. 1073 StoreSDNode *ST = cast<StoreSDNode>(N); 1074 if (!DCI.isBeforeLegalize() || 1075 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || 1076 ST->isVolatile() || ST->isIndexed()) { 1077 break; 1078 } 1079 SDValue Chain = ST->getChain(); 1080 1081 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1082 if (StoreBits % 8) { 1083 break; 1084 } 1085 unsigned ABIAlignment = getTargetData()->getABITypeAlignment( 1086 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1087 unsigned Alignment = ST->getAlignment(); 1088 if (Alignment >= ABIAlignment) { 1089 break; 1090 } 1091 1092 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1093 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1094 LD->getAlignment() == Alignment && 1095 !LD->isVolatile() && !LD->isIndexed() && 1096 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1097 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1098 LD->getBasePtr(), 1099 DAG.getConstant(StoreBits/8, MVT::i32), 1100 Alignment, ST->getSrcValue(), 1101 ST->getSrcValueOffset(), LD->getSrcValue(), 1102 LD->getSrcValueOffset()); 1103 } 1104 } 1105 break; 1106 } 1107 } 1108 return SDValue(); 1109} 1110 1111//===----------------------------------------------------------------------===// 1112// Addressing mode description hooks 1113//===----------------------------------------------------------------------===// 1114 1115static inline bool isImmUs(int64_t val) 1116{ 1117 return (val >= 0 && val <= 11); 1118} 1119 1120static inline bool isImmUs2(int64_t val) 1121{ 1122 return (val%2 == 0 && isImmUs(val/2)); 1123} 1124 1125static inline bool isImmUs4(int64_t val) 1126{ 1127 return (val%4 == 0 && isImmUs(val/4)); 1128} 1129 1130/// isLegalAddressingMode - Return true if the addressing mode represented 1131/// by AM is legal for this target, for a load/store of the specified type. 1132bool 1133XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1134 const Type *Ty) const { 1135 // Be conservative with void 1136 // FIXME: Can we be more aggressive? 1137 if (Ty->getTypeID() == Type::VoidTyID) 1138 return false; 1139 1140 const TargetData *TD = TM.getTargetData(); 1141 unsigned Size = TD->getTypeAllocSize(Ty); 1142 if (AM.BaseGV) { 1143 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1144 AM.BaseOffs%4 == 0; 1145 } 1146 1147 switch (Size) { 1148 case 1: 1149 // reg + imm 1150 if (AM.Scale == 0) { 1151 return isImmUs(AM.BaseOffs); 1152 } 1153 // reg + reg 1154 return AM.Scale == 1 && AM.BaseOffs == 0; 1155 case 2: 1156 case 3: 1157 // reg + imm 1158 if (AM.Scale == 0) { 1159 return isImmUs2(AM.BaseOffs); 1160 } 1161 // reg + reg<<1 1162 return AM.Scale == 2 && AM.BaseOffs == 0; 1163 default: 1164 // reg + imm 1165 if (AM.Scale == 0) { 1166 return isImmUs4(AM.BaseOffs); 1167 } 1168 // reg + reg<<2 1169 return AM.Scale == 4 && AM.BaseOffs == 0; 1170 } 1171 1172 return false; 1173} 1174 1175//===----------------------------------------------------------------------===// 1176// XCore Inline Assembly Support 1177//===----------------------------------------------------------------------===// 1178 1179std::vector<unsigned> XCoreTargetLowering:: 1180getRegClassForInlineAsmConstraint(const std::string &Constraint, 1181 EVT VT) const 1182{ 1183 if (Constraint.size() != 1) 1184 return std::vector<unsigned>(); 1185 1186 switch (Constraint[0]) { 1187 default : break; 1188 case 'r': 1189 return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2, 1190 XCore::R3, XCore::R4, XCore::R5, 1191 XCore::R6, XCore::R7, XCore::R8, 1192 XCore::R9, XCore::R10, XCore::R11, 0); 1193 break; 1194 } 1195 return std::vector<unsigned>(); 1196} 1197