1//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the AArch64 target.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "aarch64-isel"
15#include "AArch64.h"
16#include "AArch64InstrInfo.h"
17#include "AArch64Subtarget.h"
18#include "AArch64TargetMachine.h"
19#include "Utils/AArch64BaseInfo.h"
20#include "llvm/ADT/APSInt.h"
21#include "llvm/CodeGen/SelectionDAGISel.h"
22#include "llvm/IR/GlobalValue.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28//===--------------------------------------------------------------------===//
29/// AArch64 specific code to select AArch64 machine instructions for
30/// SelectionDAG operations.
31///
32namespace {
33
34class AArch64DAGToDAGISel : public SelectionDAGISel {
35  AArch64TargetMachine &TM;
36  const AArch64InstrInfo *TII;
37
38  /// Keep a pointer to the AArch64Subtarget around so that we can
39  /// make the right decision when generating code for different targets.
40  const AArch64Subtarget *Subtarget;
41
42public:
43  explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
44                               CodeGenOpt::Level OptLevel)
45    : SelectionDAGISel(tm, OptLevel), TM(tm),
46      TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
47      Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
48  }
49
50  virtual const char *getPassName() const {
51    return "AArch64 Instruction Selection";
52  }
53
54  // Include the pieces autogenerated from the target description.
55#include "AArch64GenDAGISel.inc"
56
57  template<unsigned MemSize>
58  bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
59    const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
60    if (!CN || CN->getZExtValue() % MemSize != 0
61        || CN->getZExtValue() / MemSize > 0xfff)
62      return false;
63
64    UImm12 =  CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
65    return true;
66  }
67
68  template<unsigned RegWidth>
69  bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
70    return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
71  }
72
73  /// Used for pre-lowered address-reference nodes, so we already know
74  /// the fields match. This operand's job is simply to add an
75  /// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction.
76  bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
77    Imm = N;
78    Shift = CurDAG->getTargetConstant(0, MVT::i32);
79    return true;
80  }
81
82  bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
83
84  bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
85                                unsigned RegWidth);
86
87  bool SelectInlineAsmMemoryOperand(const SDValue &Op,
88                                    char ConstraintCode,
89                                    std::vector<SDValue> &OutOps);
90
91  bool SelectLogicalImm(SDValue N, SDValue &Imm);
92
93  template<unsigned RegWidth>
94  bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
95    return SelectTSTBOperand(N, FixedPos, RegWidth);
96  }
97
98  bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
99
100  SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
101                       unsigned Op64);
102
103  /// Put the given constant into a pool and return a DAG which will give its
104  /// address.
105  SDValue getConstantPoolItemAddress(DebugLoc DL, const Constant *CV);
106
107  SDNode *TrySelectToMoveImm(SDNode *N);
108  SDNode *LowerToFPLitPool(SDNode *Node);
109  SDNode *SelectToLitPool(SDNode *N);
110
111  SDNode* Select(SDNode*);
112private:
113};
114}
115
116bool
117AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
118                                              unsigned RegWidth) {
119  const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
120  if (!CN) return false;
121
122  // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
123  // is between 1 and 32 for a destination w-register, or 1 and 64 for an
124  // x-register.
125  //
126  // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
127  // want THIS_NODE to be 2^fbits. This is much easier to deal with using
128  // integers.
129  bool IsExact;
130
131  // fbits is between 1 and 64 in the worst-case, which means the fmul
132  // could have 2^64 as an actual operand. Need 65 bits of precision.
133  APSInt IntVal(65, true);
134  CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
135
136  // N.b. isPowerOf2 also checks for > 0.
137  if (!IsExact || !IntVal.isPowerOf2()) return false;
138  unsigned FBits = IntVal.logBase2();
139
140  // Checks above should have guaranteed that we haven't lost information in
141  // finding FBits, but it must still be in range.
142  if (FBits == 0 || FBits > RegWidth) return false;
143
144  FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
145  return true;
146}
147
148bool
149AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
150                                                 char ConstraintCode,
151                                                 std::vector<SDValue> &OutOps) {
152  switch (ConstraintCode) {
153  default: llvm_unreachable("Unrecognised AArch64 memory constraint");
154  case 'm':
155    // FIXME: more freedom is actually permitted for 'm'. We can go
156    // hunting for a base and an offset if we want. Of course, since
157    // we don't really know how the operand is going to be used we're
158    // probably restricted to the load/store pair's simm7 as an offset
159    // range anyway.
160  case 'Q':
161    OutOps.push_back(Op);
162  }
163
164  return false;
165}
166
167bool
168AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
169  ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
170  if (!Imm || !Imm->getValueAPF().isPosZero())
171    return false;
172
173  // Doesn't actually carry any information, but keeps TableGen quiet.
174  Dummy = CurDAG->getTargetConstant(0, MVT::i32);
175  return true;
176}
177
178bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
179  uint32_t Bits;
180  uint32_t RegWidth = N.getValueType().getSizeInBits();
181
182  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
183  if (!CN) return false;
184
185  if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
186    return false;
187
188  Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
189  return true;
190}
191
192SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
193  SDNode *ResNode;
194  DebugLoc dl = Node->getDebugLoc();
195  EVT DestType = Node->getValueType(0);
196  unsigned DestWidth = DestType.getSizeInBits();
197
198  unsigned MOVOpcode;
199  EVT MOVType;
200  int UImm16, Shift;
201  uint32_t LogicalBits;
202
203  uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
204  if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
205    MOVType = DestType;
206    MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
207  } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
208    MOVType = DestType;
209    MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
210  } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
211    // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
212    // use a 32-bit instruction: "movn w0, 0xedbc".
213    MOVType = MVT::i32;
214    MOVOpcode = AArch64::MOVNwii;
215  } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits))  {
216    MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
217    uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
218
219    return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
220                              CurDAG->getRegister(ZR, DestType),
221                              CurDAG->getTargetConstant(LogicalBits, MVT::i32));
222  } else {
223    // Can't handle it in one instruction. There's scope for permitting two (or
224    // more) instructions, but that'll need more thought.
225    return NULL;
226  }
227
228  ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
229                                   CurDAG->getTargetConstant(UImm16, MVT::i32),
230                                   CurDAG->getTargetConstant(Shift, MVT::i32));
231
232  if (MOVType != DestType) {
233    ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
234                          MVT::i64, MVT::i32, MVT::Other,
235                          CurDAG->getTargetConstant(0, MVT::i64),
236                          SDValue(ResNode, 0),
237                          CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
238  }
239
240  return ResNode;
241}
242
243SDValue
244AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL,
245                                                const Constant *CV) {
246  EVT PtrVT = TLI.getPointerTy();
247
248  switch (TLI.getTargetMachine().getCodeModel()) {
249  case CodeModel::Small: {
250    unsigned Alignment =
251        TLI.getDataLayout()->getABITypeAlignment(CV->getType());
252    return CurDAG->getNode(
253        AArch64ISD::WrapperSmall, DL, PtrVT,
254        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
255        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
256        CurDAG->getConstant(Alignment, MVT::i32));
257  }
258  case CodeModel::Large: {
259    SDNode *LitAddr;
260    LitAddr = CurDAG->getMachineNode(
261        AArch64::MOVZxii, DL, PtrVT,
262        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
263        CurDAG->getTargetConstant(0, MVT::i32));
264    LitAddr = CurDAG->getMachineNode(
265        AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
266        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
267        CurDAG->getTargetConstant(0, MVT::i32));
268    LitAddr = CurDAG->getMachineNode(
269        AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
270        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
271        CurDAG->getTargetConstant(0, MVT::i32));
272    LitAddr = CurDAG->getMachineNode(
273        AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
274        CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
275        CurDAG->getTargetConstant(0, MVT::i32));
276    return SDValue(LitAddr, 0);
277  }
278  default:
279    llvm_unreachable("Only small and large code models supported now");
280  }
281}
282
283SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
284  DebugLoc DL = Node->getDebugLoc();
285  uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
286  int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
287  EVT DestType = Node->getValueType(0);
288
289  // Since we may end up loading a 64-bit constant from a 32-bit entry the
290  // constant in the pool may have a different type to the eventual node.
291  ISD::LoadExtType Extension;
292  EVT MemType;
293
294  assert((DestType == MVT::i64 || DestType == MVT::i32)
295         && "Only expect integer constants at the moment");
296
297  if (DestType == MVT::i32) {
298    Extension = ISD::NON_EXTLOAD;
299    MemType = MVT::i32;
300  } else if (UnsignedVal <= UINT32_MAX) {
301    Extension = ISD::ZEXTLOAD;
302    MemType = MVT::i32;
303  } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
304    Extension = ISD::SEXTLOAD;
305    MemType = MVT::i32;
306  } else {
307    Extension = ISD::NON_EXTLOAD;
308    MemType = MVT::i64;
309  }
310
311  Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
312                                                  MemType.getSizeInBits()),
313                                  UnsignedVal);
314  SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
315  unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(CV->getType());
316
317  return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
318                            PoolAddr,
319                            MachinePointerInfo::getConstantPool(), MemType,
320                            /* isVolatile = */ false,
321                            /* isNonTemporal = */ false,
322                            Alignment).getNode();
323}
324
325SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
326  DebugLoc DL = Node->getDebugLoc();
327  const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
328  EVT DestType = Node->getValueType(0);
329
330  unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(FV->getType());
331  SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
332
333  return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
334                         MachinePointerInfo::getConstantPool(),
335                         /* isVolatile = */ false,
336                         /* isNonTemporal = */ false,
337                         /* isInvariant = */ true,
338                         Alignment).getNode();
339}
340
341bool
342AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
343                                       unsigned RegWidth) {
344  const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
345  if (!CN) return false;
346
347  uint64_t Val = CN->getZExtValue();
348
349  if (!isPowerOf2_64(Val)) return false;
350
351  unsigned TestedBit = Log2_64(Val);
352  // Checks above should have guaranteed that we haven't lost information in
353  // finding TestedBit, but it must still be in range.
354  if (TestedBit >= RegWidth) return false;
355
356  FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
357  return true;
358}
359
360SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
361                                          unsigned Op16,unsigned Op32,
362                                          unsigned Op64) {
363  // Mostly direct translation to the given operations, except that we preserve
364  // the AtomicOrdering for use later on.
365  AtomicSDNode *AN = cast<AtomicSDNode>(Node);
366  EVT VT = AN->getMemoryVT();
367
368  unsigned Op;
369  if (VT == MVT::i8)
370    Op = Op8;
371  else if (VT == MVT::i16)
372    Op = Op16;
373  else if (VT == MVT::i32)
374    Op = Op32;
375  else if (VT == MVT::i64)
376    Op = Op64;
377  else
378    llvm_unreachable("Unexpected atomic operation");
379
380  SmallVector<SDValue, 4> Ops;
381  for (unsigned i = 1; i < AN->getNumOperands(); ++i)
382      Ops.push_back(AN->getOperand(i));
383
384  Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
385  Ops.push_back(AN->getOperand(0)); // Chain moves to the end
386
387  return CurDAG->SelectNodeTo(Node, Op,
388                              AN->getValueType(0), MVT::Other,
389                              &Ops[0], Ops.size());
390}
391
392SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
393  // Dump information about the Node being selected
394  DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
395
396  if (Node->isMachineOpcode()) {
397    DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
398    Node->setNodeId(-1);
399    return NULL;
400  }
401
402  switch (Node->getOpcode()) {
403  case ISD::ATOMIC_LOAD_ADD:
404    return SelectAtomic(Node,
405                        AArch64::ATOMIC_LOAD_ADD_I8,
406                        AArch64::ATOMIC_LOAD_ADD_I16,
407                        AArch64::ATOMIC_LOAD_ADD_I32,
408                        AArch64::ATOMIC_LOAD_ADD_I64);
409  case ISD::ATOMIC_LOAD_SUB:
410    return SelectAtomic(Node,
411                        AArch64::ATOMIC_LOAD_SUB_I8,
412                        AArch64::ATOMIC_LOAD_SUB_I16,
413                        AArch64::ATOMIC_LOAD_SUB_I32,
414                        AArch64::ATOMIC_LOAD_SUB_I64);
415  case ISD::ATOMIC_LOAD_AND:
416    return SelectAtomic(Node,
417                        AArch64::ATOMIC_LOAD_AND_I8,
418                        AArch64::ATOMIC_LOAD_AND_I16,
419                        AArch64::ATOMIC_LOAD_AND_I32,
420                        AArch64::ATOMIC_LOAD_AND_I64);
421  case ISD::ATOMIC_LOAD_OR:
422    return SelectAtomic(Node,
423                        AArch64::ATOMIC_LOAD_OR_I8,
424                        AArch64::ATOMIC_LOAD_OR_I16,
425                        AArch64::ATOMIC_LOAD_OR_I32,
426                        AArch64::ATOMIC_LOAD_OR_I64);
427  case ISD::ATOMIC_LOAD_XOR:
428    return SelectAtomic(Node,
429                        AArch64::ATOMIC_LOAD_XOR_I8,
430                        AArch64::ATOMIC_LOAD_XOR_I16,
431                        AArch64::ATOMIC_LOAD_XOR_I32,
432                        AArch64::ATOMIC_LOAD_XOR_I64);
433  case ISD::ATOMIC_LOAD_NAND:
434    return SelectAtomic(Node,
435                        AArch64::ATOMIC_LOAD_NAND_I8,
436                        AArch64::ATOMIC_LOAD_NAND_I16,
437                        AArch64::ATOMIC_LOAD_NAND_I32,
438                        AArch64::ATOMIC_LOAD_NAND_I64);
439  case ISD::ATOMIC_LOAD_MIN:
440    return SelectAtomic(Node,
441                        AArch64::ATOMIC_LOAD_MIN_I8,
442                        AArch64::ATOMIC_LOAD_MIN_I16,
443                        AArch64::ATOMIC_LOAD_MIN_I32,
444                        AArch64::ATOMIC_LOAD_MIN_I64);
445  case ISD::ATOMIC_LOAD_MAX:
446    return SelectAtomic(Node,
447                        AArch64::ATOMIC_LOAD_MAX_I8,
448                        AArch64::ATOMIC_LOAD_MAX_I16,
449                        AArch64::ATOMIC_LOAD_MAX_I32,
450                        AArch64::ATOMIC_LOAD_MAX_I64);
451  case ISD::ATOMIC_LOAD_UMIN:
452    return SelectAtomic(Node,
453                        AArch64::ATOMIC_LOAD_UMIN_I8,
454                        AArch64::ATOMIC_LOAD_UMIN_I16,
455                        AArch64::ATOMIC_LOAD_UMIN_I32,
456                        AArch64::ATOMIC_LOAD_UMIN_I64);
457  case ISD::ATOMIC_LOAD_UMAX:
458    return SelectAtomic(Node,
459                        AArch64::ATOMIC_LOAD_UMAX_I8,
460                        AArch64::ATOMIC_LOAD_UMAX_I16,
461                        AArch64::ATOMIC_LOAD_UMAX_I32,
462                        AArch64::ATOMIC_LOAD_UMAX_I64);
463  case ISD::ATOMIC_SWAP:
464    return SelectAtomic(Node,
465                        AArch64::ATOMIC_SWAP_I8,
466                        AArch64::ATOMIC_SWAP_I16,
467                        AArch64::ATOMIC_SWAP_I32,
468                        AArch64::ATOMIC_SWAP_I64);
469  case ISD::ATOMIC_CMP_SWAP:
470    return SelectAtomic(Node,
471                        AArch64::ATOMIC_CMP_SWAP_I8,
472                        AArch64::ATOMIC_CMP_SWAP_I16,
473                        AArch64::ATOMIC_CMP_SWAP_I32,
474                        AArch64::ATOMIC_CMP_SWAP_I64);
475  case ISD::FrameIndex: {
476    int FI = cast<FrameIndexSDNode>(Node)->getIndex();
477    EVT PtrTy = TLI.getPointerTy();
478    SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
479    return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
480                                TFI, CurDAG->getTargetConstant(0, PtrTy));
481  }
482  case ISD::ConstantPool: {
483    // Constant pools are fine, just create a Target entry.
484    ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
485    const Constant *C = CN->getConstVal();
486    SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
487
488    ReplaceUses(SDValue(Node, 0), CP);
489    return NULL;
490  }
491  case ISD::Constant: {
492    SDNode *ResNode = 0;
493    if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
494      // XZR and WZR are probably even better than an actual move: most of the
495      // time they can be folded into another instruction with *no* cost.
496
497      EVT Ty = Node->getValueType(0);
498      assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
499      uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
500      ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
501                                       Node->getDebugLoc(),
502                                       Register, Ty).getNode();
503    }
504
505    // Next best option is a move-immediate, see if we can do that.
506    if (!ResNode) {
507      ResNode = TrySelectToMoveImm(Node);
508    }
509
510    if (ResNode)
511      return ResNode;
512
513    // If even that fails we fall back to a lit-pool entry at the moment. Future
514    // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
515    ResNode = SelectToLitPool(Node);
516    assert(ResNode && "We need *some* way to materialise a constant");
517
518    // We want to continue selection at this point since the litpool access
519    // generated used generic nodes for simplicity.
520    ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
521    Node = ResNode;
522    break;
523  }
524  case ISD::ConstantFP: {
525    if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
526      // FMOV will take care of it from TableGen
527      break;
528    }
529
530    SDNode *ResNode = LowerToFPLitPool(Node);
531    ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
532
533    // We want to continue selection at this point since the litpool access
534    // generated used generic nodes for simplicity.
535    Node = ResNode;
536    break;
537  }
538  default:
539    break; // Let generic code handle it
540  }
541
542  SDNode *ResNode = SelectCode(Node);
543
544  DEBUG(dbgs() << "=> ";
545        if (ResNode == NULL || ResNode == Node)
546          Node->dump(CurDAG);
547        else
548          ResNode->dump(CurDAG);
549        dbgs() << "\n");
550
551  return ResNode;
552}
553
554/// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
555/// instruction scheduling.
556FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
557                                         CodeGenOpt::Level OptLevel) {
558  return new AArch64DAGToDAGISel(TM, OptLevel);
559}
560