1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// \file
11/// This file describes how to lower LLVM code to machine code.  This has two
12/// main components:
13///
14///  1. Which ValueTypes are natively supported by the target.
15///  2. Which operations are supported for supported ValueTypes.
16///  3. Cost thresholds for alternative implementations of certain operations.
17///
18/// In addition it has a few other components, like information about FP
19/// immediates.
20///
21//===----------------------------------------------------------------------===//
22
23#ifndef LLVM_TARGET_TARGETLOWERING_H
24#define LLVM_TARGET_TARGETLOWERING_H
25
26#include "llvm/ADT/DenseMap.h"
27#include "llvm/CodeGen/DAGCombine.h"
28#include "llvm/CodeGen/RuntimeLibcalls.h"
29#include "llvm/CodeGen/SelectionDAGNodes.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/CallingConv.h"
32#include "llvm/IR/InlineAsm.h"
33#include "llvm/Support/CallSite.h"
34#include "llvm/Target/TargetCallingConv.h"
35#include "llvm/Target/TargetMachine.h"
36#include <climits>
37#include <map>
38#include <vector>
39
40namespace llvm {
41  class CallInst;
42  class CCState;
43  class FastISel;
44  class FunctionLoweringInfo;
45  class ImmutableCallSite;
46  class IntrinsicInst;
47  class MachineBasicBlock;
48  class MachineFunction;
49  class MachineInstr;
50  class MachineJumpTableInfo;
51  class MCContext;
52  class MCExpr;
53  template<typename T> class SmallVectorImpl;
54  class DataLayout;
55  class TargetRegisterClass;
56  class TargetLibraryInfo;
57  class TargetLoweringObjectFile;
58  class Value;
59
60  namespace Sched {
61    enum Preference {
62      None,             // No preference
63      Source,           // Follow source order.
64      RegPressure,      // Scheduling for lowest register pressure.
65      Hybrid,           // Scheduling for both latency and register pressure.
66      ILP,              // Scheduling for ILP in low register pressure mode.
67      VLIW              // Scheduling for VLIW targets.
68    };
69  }
70
71/// This base class for TargetLowering contains the SelectionDAG-independent
72/// parts that can be used from the rest of CodeGen.
73class TargetLoweringBase {
74  TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
75  void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
76
77public:
78  /// This enum indicates whether operations are valid for a target, and if not,
79  /// what action should be used to make them valid.
80  enum LegalizeAction {
81    Legal,      // The target natively supports this operation.
82    Promote,    // This operation should be executed in a larger type.
83    Expand,     // Try to expand this to other ops, otherwise use a libcall.
84    Custom      // Use the LowerOperation hook to implement custom lowering.
85  };
86
87  /// This enum indicates whether a types are legal for a target, and if not,
88  /// what action should be used to make them valid.
89  enum LegalizeTypeAction {
90    TypeLegal,           // The target natively supports this type.
91    TypePromoteInteger,  // Replace this integer with a larger one.
92    TypeExpandInteger,   // Split this integer into two of half the size.
93    TypeSoftenFloat,     // Convert this float to a same size integer type.
94    TypeExpandFloat,     // Split this float into two of half the size.
95    TypeScalarizeVector, // Replace this one-element vector with its element.
96    TypeSplitVector,     // Split this vector into two of half the size.
97    TypeWidenVector      // This vector should be widened into a larger vector.
98  };
99
100  /// LegalizeKind holds the legalization kind that needs to happen to EVT
101  /// in order to type-legalize it.
102  typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
103
104  /// Enum that describes how the target represents true/false values.
105  enum BooleanContent {
106    UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
107    ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
108    ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
109  };
110
111  /// Enum that describes what type of support for selects the target has.
112  enum SelectSupportKind {
113    ScalarValSelect,      // The target supports scalar selects (ex: cmov).
114    ScalarCondVectorVal,  // The target supports selects with a scalar condition
115                          // and vector values (ex: cmov).
116    VectorMaskSelect      // The target supports vector selects with a vector
117                          // mask (ex: x86 blends).
118  };
119
120  static ISD::NodeType getExtendForContent(BooleanContent Content) {
121    switch (Content) {
122    case UndefinedBooleanContent:
123      // Extend by adding rubbish bits.
124      return ISD::ANY_EXTEND;
125    case ZeroOrOneBooleanContent:
126      // Extend by adding zero bits.
127      return ISD::ZERO_EXTEND;
128    case ZeroOrNegativeOneBooleanContent:
129      // Extend by copying the sign bit.
130      return ISD::SIGN_EXTEND;
131    }
132    llvm_unreachable("Invalid content kind");
133  }
134
135  /// NOTE: The constructor takes ownership of TLOF.
136  explicit TargetLoweringBase(const TargetMachine &TM,
137                              const TargetLoweringObjectFile *TLOF);
138  virtual ~TargetLoweringBase();
139
140protected:
141  /// \brief Initialize all of the actions to default values.
142  void initActions();
143
144public:
145  const TargetMachine &getTargetMachine() const { return TM; }
146  const DataLayout *getDataLayout() const { return TD; }
147  const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
148
149  bool isBigEndian() const { return !IsLittleEndian; }
150  bool isLittleEndian() const { return IsLittleEndian; }
151
152  /// Return the pointer type for the given address space, defaults to
153  /// the pointer type from the data layout.
154  /// FIXME: The default needs to be removed once all the code is updated.
155  virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
156  unsigned getPointerSizeInBits(uint32_t AS = 0) const;
157  unsigned getPointerTypeSizeInBits(Type *Ty) const;
158  virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
159
160  EVT getShiftAmountTy(EVT LHSTy) const;
161
162  /// Returns the type to be used for the index operand of:
163  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
164  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
165  virtual MVT getVectorIdxTy() const {
166    return getPointerTy();
167  }
168
169  /// Return true if the select operation is expensive for this target.
170  bool isSelectExpensive() const { return SelectIsExpensive; }
171
172  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
173    return true;
174  }
175
176  /// Return true if a vector of the given type should be split
177  /// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type
178  /// legalization.
179  virtual bool shouldSplitVectorElementType(EVT /*VT*/) const { return false; }
180
181  /// Return true if integer divide is usually cheaper than a sequence of
182  /// several shifts, adds, and multiplies for this target.
183  bool isIntDivCheap() const { return IntDivIsCheap; }
184
185  /// Returns true if target has indicated at least one type should be bypassed.
186  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
187
188  /// Returns map of slow types for division or remainder with corresponding
189  /// fast types
190  const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
191    return BypassSlowDivWidths;
192  }
193
194  /// Return true if pow2 div is cheaper than a chain of srl/add/sra.
195  bool isPow2DivCheap() const { return Pow2DivIsCheap; }
196
197  /// Return true if Flow Control is an expensive operation that should be
198  /// avoided.
199  bool isJumpExpensive() const { return JumpIsExpensive; }
200
201  /// Return true if selects are only cheaper than branches if the branch is
202  /// unlikely to be predicted right.
203  bool isPredictableSelectExpensive() const {
204    return PredictableSelectIsExpensive;
205  }
206
207  /// isLoadBitCastBeneficial() - Return true if the following transform
208  /// is beneficial.
209  /// fold (conv (load x)) -> (load (conv*)x)
210  /// On architectures that don't natively support some vector loads efficiently,
211  /// casting the load to a smaller vector of larger types and loading
212  /// is more efficient, however, this can be undone by optimizations in
213  /// dag combiner.
214  virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
215    return true;
216  }
217
218  /// Return the ValueType of the result of SETCC operations.  Also used to
219  /// obtain the target's preferred type for the condition operand of SELECT and
220  /// BRCOND nodes.  In the case of BRCOND the argument passed is MVT::Other
221  /// since there are no other operands to get a type hint from.
222  virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
223
224  /// Return the ValueType for comparison libcalls. Comparions libcalls include
225  /// floating point comparion calls, and Ordered/Unordered check calls on
226  /// floating point numbers.
227  virtual
228  MVT::SimpleValueType getCmpLibcallReturnType() const;
229
230  /// For targets without i1 registers, this gives the nature of the high-bits
231  /// of boolean values held in types wider than i1.
232  ///
233  /// "Boolean values" are special true/false values produced by nodes like
234  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
235  /// Not to be confused with general values promoted from i1.  Some cpus
236  /// distinguish between vectors of boolean and scalars; the isVec parameter
237  /// selects between the two kinds.  For example on X86 a scalar boolean should
238  /// be zero extended from i1, while the elements of a vector of booleans
239  /// should be sign extended from i1.
240  BooleanContent getBooleanContents(bool isVec) const {
241    return isVec ? BooleanVectorContents : BooleanContents;
242  }
243
244  /// Return target scheduling preference.
245  Sched::Preference getSchedulingPreference() const {
246    return SchedPreferenceInfo;
247  }
248
249  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
250  /// for different nodes. This function returns the preference (or none) for
251  /// the given node.
252  virtual Sched::Preference getSchedulingPreference(SDNode *) const {
253    return Sched::None;
254  }
255
256  /// Return the register class that should be used for the specified value
257  /// type.
258  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
259    const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
260    assert(RC && "This value type is not natively supported!");
261    return RC;
262  }
263
264  /// Return the 'representative' register class for the specified value
265  /// type.
266  ///
267  /// The 'representative' register class is the largest legal super-reg
268  /// register class for the register class of the value type.  For example, on
269  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
270  /// register class is GR64 on x86_64.
271  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
272    const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
273    return RC;
274  }
275
276  /// Return the cost of the 'representative' register class for the specified
277  /// value type.
278  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
279    return RepRegClassCostForVT[VT.SimpleTy];
280  }
281
282  /// Return true if the target has native support for the specified value type.
283  /// This means that it has a register that directly holds it without
284  /// promotions or expansions.
285  bool isTypeLegal(EVT VT) const {
286    assert(!VT.isSimple() ||
287           (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
288    return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
289  }
290
291  class ValueTypeActionImpl {
292    /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
293    /// that indicates how instruction selection should deal with the type.
294    uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
295
296  public:
297    ValueTypeActionImpl() {
298      std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
299    }
300
301    LegalizeTypeAction getTypeAction(MVT VT) const {
302      return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
303    }
304
305    void setTypeAction(MVT VT, LegalizeTypeAction Action) {
306      unsigned I = VT.SimpleTy;
307      ValueTypeActions[I] = Action;
308    }
309  };
310
311  const ValueTypeActionImpl &getValueTypeActions() const {
312    return ValueTypeActions;
313  }
314
315  /// Return how we should legalize values of this type, either it is already
316  /// legal (return 'Legal') or we need to promote it to a larger type (return
317  /// 'Promote'), or we need to expand it into multiple registers of smaller
318  /// integer type (return 'Expand').  'Custom' is not an option.
319  LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
320    return getTypeConversion(Context, VT).first;
321  }
322  LegalizeTypeAction getTypeAction(MVT VT) const {
323    return ValueTypeActions.getTypeAction(VT);
324  }
325
326  /// For types supported by the target, this is an identity function.  For
327  /// types that must be promoted to larger types, this returns the larger type
328  /// to promote to.  For integer types that are larger than the largest integer
329  /// register, this contains one step in the expansion to get to the smaller
330  /// register. For illegal floating point types, this returns the integer type
331  /// to transform to.
332  EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
333    return getTypeConversion(Context, VT).second;
334  }
335
336  /// For types supported by the target, this is an identity function.  For
337  /// types that must be expanded (i.e. integer types that are larger than the
338  /// largest integer register or illegal floating point types), this returns
339  /// the largest legal type it will be expanded to.
340  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
341    assert(!VT.isVector());
342    while (true) {
343      switch (getTypeAction(Context, VT)) {
344      case TypeLegal:
345        return VT;
346      case TypeExpandInteger:
347        VT = getTypeToTransformTo(Context, VT);
348        break;
349      default:
350        llvm_unreachable("Type is not legal nor is it to be expanded!");
351      }
352    }
353  }
354
355  /// Vector types are broken down into some number of legal first class types.
356  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
357  /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
358  /// turns into 4 EVT::i32 values with both PPC and X86.
359  ///
360  /// This method returns the number of registers needed, and the VT for each
361  /// register.  It also returns the VT and quantity of the intermediate values
362  /// before they are promoted/expanded.
363  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
364                                  EVT &IntermediateVT,
365                                  unsigned &NumIntermediates,
366                                  MVT &RegisterVT) const;
367
368  struct IntrinsicInfo {
369    unsigned     opc;         // target opcode
370    EVT          memVT;       // memory VT
371    const Value* ptrVal;      // value representing memory location
372    int          offset;      // offset off of ptrVal
373    unsigned     align;       // alignment
374    bool         vol;         // is volatile?
375    bool         readMem;     // reads memory?
376    bool         writeMem;    // writes memory?
377  };
378
379  /// Given an intrinsic, checks if on the target the intrinsic will need to map
380  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
381  /// true and store the intrinsic information into the IntrinsicInfo that was
382  /// passed to the function.
383  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
384                                  unsigned /*Intrinsic*/) const {
385    return false;
386  }
387
388  /// Returns true if the target can instruction select the specified FP
389  /// immediate natively. If false, the legalizer will materialize the FP
390  /// immediate as a load from a constant pool.
391  virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
392    return false;
393  }
394
395  /// Targets can use this to indicate that they only support *some*
396  /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
397  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
398  /// legal.
399  virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
400                                  EVT /*VT*/) const {
401    return true;
402  }
403
404  /// Returns true if the operation can trap for the value type.
405  ///
406  /// VT must be a legal type. By default, we optimistically assume most
407  /// operations don't trap except for divide and remainder.
408  virtual bool canOpTrap(unsigned Op, EVT VT) const;
409
410  /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
411  /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
412  /// a VAND with a constant pool entry.
413  virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
414                                      EVT /*VT*/) const {
415    return false;
416  }
417
418  /// Return how this operation should be treated: either it is legal, needs to
419  /// be promoted to a larger size, needs to be expanded to some other code
420  /// sequence, or the target has a custom expander for it.
421  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
422    if (VT.isExtended()) return Expand;
423    // If a target-specific SDNode requires legalization, require the target
424    // to provide custom legalization for it.
425    if (Op > array_lengthof(OpActions[0])) return Custom;
426    unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
427    return (LegalizeAction)OpActions[I][Op];
428  }
429
430  /// Return true if the specified operation is legal on this target or can be
431  /// made legal with custom lowering. This is used to help guide high-level
432  /// lowering decisions.
433  bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
434    return (VT == MVT::Other || isTypeLegal(VT)) &&
435      (getOperationAction(Op, VT) == Legal ||
436       getOperationAction(Op, VT) == Custom);
437  }
438
439  /// Return true if the specified operation is legal on this target or can be
440  /// made legal using promotion. This is used to help guide high-level lowering
441  /// decisions.
442  bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
443    return (VT == MVT::Other || isTypeLegal(VT)) &&
444      (getOperationAction(Op, VT) == Legal ||
445       getOperationAction(Op, VT) == Promote);
446  }
447
448  /// Return true if the specified operation is illegal on this target or
449  /// unlikely to be made legal with custom lowering. This is used to help guide
450  /// high-level lowering decisions.
451  bool isOperationExpand(unsigned Op, EVT VT) const {
452    return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
453  }
454
455  /// Return true if the specified operation is legal on this target.
456  bool isOperationLegal(unsigned Op, EVT VT) const {
457    return (VT == MVT::Other || isTypeLegal(VT)) &&
458           getOperationAction(Op, VT) == Legal;
459  }
460
461  /// Return how this load with extension should be treated: either it is legal,
462  /// needs to be promoted to a larger size, needs to be expanded to some other
463  /// code sequence, or the target has a custom expander for it.
464  LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
465    assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
466           "Table isn't big enough!");
467    return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
468  }
469
470  /// Return true if the specified load with extension is legal on this target.
471  bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
472    return VT.isSimple() &&
473      getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
474  }
475
476  /// Return how this store with truncation should be treated: either it is
477  /// legal, needs to be promoted to a larger size, needs to be expanded to some
478  /// other code sequence, or the target has a custom expander for it.
479  LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
480    assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
481           "Table isn't big enough!");
482    return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
483                                            [MemVT.SimpleTy];
484  }
485
486  /// Return true if the specified store with truncation is legal on this
487  /// target.
488  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
489    return isTypeLegal(ValVT) && MemVT.isSimple() &&
490      getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
491  }
492
493  /// Return how the indexed load should be treated: either it is legal, needs
494  /// to be promoted to a larger size, needs to be expanded to some other code
495  /// sequence, or the target has a custom expander for it.
496  LegalizeAction
497  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
498    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
499           "Table isn't big enough!");
500    unsigned Ty = (unsigned)VT.SimpleTy;
501    return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
502  }
503
504  /// Return true if the specified indexed load is legal on this target.
505  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
506    return VT.isSimple() &&
507      (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
508       getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
509  }
510
511  /// Return how the indexed store should be treated: either it is legal, needs
512  /// to be promoted to a larger size, needs to be expanded to some other code
513  /// sequence, or the target has a custom expander for it.
514  LegalizeAction
515  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
516    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
517           "Table isn't big enough!");
518    unsigned Ty = (unsigned)VT.SimpleTy;
519    return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
520  }
521
522  /// Return true if the specified indexed load is legal on this target.
523  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
524    return VT.isSimple() &&
525      (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
526       getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
527  }
528
529  /// Return how the condition code should be treated: either it is legal, needs
530  /// to be expanded to some other code sequence, or the target has a custom
531  /// expander for it.
532  LegalizeAction
533  getCondCodeAction(ISD::CondCode CC, MVT VT) const {
534    assert((unsigned)CC < array_lengthof(CondCodeActions) &&
535           ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
536           "Table isn't big enough!");
537    // See setCondCodeAction for how this is encoded.
538    uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
539    uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
540    LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
541    assert(Action != Promote && "Can't promote condition code!");
542    return Action;
543  }
544
545  /// Return true if the specified condition code is legal on this target.
546  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
547    return
548      getCondCodeAction(CC, VT) == Legal ||
549      getCondCodeAction(CC, VT) == Custom;
550  }
551
552
553  /// If the action for this operation is to promote, this method returns the
554  /// ValueType to promote to.
555  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
556    assert(getOperationAction(Op, VT) == Promote &&
557           "This operation isn't promoted!");
558
559    // See if this has an explicit type specified.
560    std::map<std::pair<unsigned, MVT::SimpleValueType>,
561             MVT::SimpleValueType>::const_iterator PTTI =
562      PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
563    if (PTTI != PromoteToType.end()) return PTTI->second;
564
565    assert((VT.isInteger() || VT.isFloatingPoint()) &&
566           "Cannot autopromote this type, add it with AddPromotedToType.");
567
568    MVT NVT = VT;
569    do {
570      NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
571      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
572             "Didn't find type to promote to!");
573    } while (!isTypeLegal(NVT) ||
574              getOperationAction(Op, NVT) == Promote);
575    return NVT;
576  }
577
578  /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
579  /// operations except for the pointer size.  If AllowUnknown is true, this
580  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
581  /// otherwise it will assert.
582  EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
583    // Lower scalar pointers to native pointer types.
584    if (PointerType *PTy = dyn_cast<PointerType>(Ty))
585      return getPointerTy(PTy->getAddressSpace());
586
587    if (Ty->isVectorTy()) {
588      VectorType *VTy = cast<VectorType>(Ty);
589      Type *Elm = VTy->getElementType();
590      // Lower vectors of pointers to native pointer types.
591      if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
592        EVT PointerTy(getPointerTy(PT->getAddressSpace()));
593        Elm = PointerTy.getTypeForEVT(Ty->getContext());
594      }
595
596      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
597                       VTy->getNumElements());
598    }
599    return EVT::getEVT(Ty, AllowUnknown);
600  }
601
602  /// Return the MVT corresponding to this LLVM type. See getValueType.
603  MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
604    return getValueType(Ty, AllowUnknown).getSimpleVT();
605  }
606
607  /// Return the desired alignment for ByVal aggregate function arguments in the
608  /// caller parameter area.  This is the actual alignment, not its logarithm.
609  virtual unsigned getByValTypeAlignment(Type *Ty) const;
610
611  /// Return the type of registers that this ValueType will eventually require.
612  MVT getRegisterType(MVT VT) const {
613    assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
614    return RegisterTypeForVT[VT.SimpleTy];
615  }
616
617  /// Return the type of registers that this ValueType will eventually require.
618  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
619    if (VT.isSimple()) {
620      assert((unsigned)VT.getSimpleVT().SimpleTy <
621                array_lengthof(RegisterTypeForVT));
622      return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
623    }
624    if (VT.isVector()) {
625      EVT VT1;
626      MVT RegisterVT;
627      unsigned NumIntermediates;
628      (void)getVectorTypeBreakdown(Context, VT, VT1,
629                                   NumIntermediates, RegisterVT);
630      return RegisterVT;
631    }
632    if (VT.isInteger()) {
633      return getRegisterType(Context, getTypeToTransformTo(Context, VT));
634    }
635    llvm_unreachable("Unsupported extended type!");
636  }
637
638  /// Return the number of registers that this ValueType will eventually
639  /// require.
640  ///
641  /// This is one for any types promoted to live in larger registers, but may be
642  /// more than one for types (like i64) that are split into pieces.  For types
643  /// like i140, which are first promoted then expanded, it is the number of
644  /// registers needed to hold all the bits of the original type.  For an i140
645  /// on a 32 bit machine this means 5 registers.
646  unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
647    if (VT.isSimple()) {
648      assert((unsigned)VT.getSimpleVT().SimpleTy <
649                array_lengthof(NumRegistersForVT));
650      return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
651    }
652    if (VT.isVector()) {
653      EVT VT1;
654      MVT VT2;
655      unsigned NumIntermediates;
656      return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
657    }
658    if (VT.isInteger()) {
659      unsigned BitWidth = VT.getSizeInBits();
660      unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
661      return (BitWidth + RegWidth - 1) / RegWidth;
662    }
663    llvm_unreachable("Unsupported extended type!");
664  }
665
666  /// If true, then instruction selection should seek to shrink the FP constant
667  /// of the specified type to a smaller type in order to save space and / or
668  /// reduce runtime.
669  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
670
671  /// If true, the target has custom DAG combine transformations that it can
672  /// perform for the specified node.
673  bool hasTargetDAGCombine(ISD::NodeType NT) const {
674    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
675    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
676  }
677
678  /// \brief Get maximum # of store operations permitted for llvm.memset
679  ///
680  /// This function returns the maximum number of store operations permitted
681  /// to replace a call to llvm.memset. The value is set by the target at the
682  /// performance threshold for such a replacement. If OptSize is true,
683  /// return the limit for functions that have OptSize attribute.
684  unsigned getMaxStoresPerMemset(bool OptSize) const {
685    return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
686  }
687
688  /// \brief Get maximum # of store operations permitted for llvm.memcpy
689  ///
690  /// This function returns the maximum number of store operations permitted
691  /// to replace a call to llvm.memcpy. The value is set by the target at the
692  /// performance threshold for such a replacement. If OptSize is true,
693  /// return the limit for functions that have OptSize attribute.
694  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
695    return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
696  }
697
698  /// \brief Get maximum # of store operations permitted for llvm.memmove
699  ///
700  /// This function returns the maximum number of store operations permitted
701  /// to replace a call to llvm.memmove. The value is set by the target at the
702  /// performance threshold for such a replacement. If OptSize is true,
703  /// return the limit for functions that have OptSize attribute.
704  unsigned getMaxStoresPerMemmove(bool OptSize) const {
705    return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
706  }
707
708  /// \brief Determine if the target supports unaligned memory accesses.
709  ///
710  /// This function returns true if the target allows unaligned memory accesses.
711  /// of the specified type. If true, it also returns whether the unaligned
712  /// memory access is "fast" in the second argument by reference. This is used,
713  /// for example, in situations where an array copy/move/set is converted to a
714  /// sequence of store operations. It's use helps to ensure that such
715  /// replacements don't generate code that causes an alignment error (trap) on
716  /// the target machine.
717  virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const {
718    return false;
719  }
720
721  /// Returns the target specific optimal type for load and store operations as
722  /// a result of memset, memcpy, and memmove lowering.
723  ///
724  /// If DstAlign is zero that means it's safe to destination alignment can
725  /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
726  /// a need to check it against alignment requirement, probably because the
727  /// source does not need to be loaded. If 'IsMemset' is true, that means it's
728  /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
729  /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
730  /// does not need to be loaded.  It returns EVT::Other if the type should be
731  /// determined using generic target-independent logic.
732  virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
733                                  unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
734                                  bool /*IsMemset*/,
735                                  bool /*ZeroMemset*/,
736                                  bool /*MemcpyStrSrc*/,
737                                  MachineFunction &/*MF*/) const {
738    return MVT::Other;
739  }
740
741  /// Returns true if it's safe to use load / store of the specified type to
742  /// expand memcpy / memset inline.
743  ///
744  /// This is mostly true for all types except for some special cases. For
745  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
746  /// fstpl which also does type conversion. Note the specified type doesn't
747  /// have to be legal as the hook is used before type legalization.
748  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
749
750  /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
751  bool usesUnderscoreSetJmp() const {
752    return UseUnderscoreSetJmp;
753  }
754
755  /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
756  bool usesUnderscoreLongJmp() const {
757    return UseUnderscoreLongJmp;
758  }
759
760  /// Return whether the target can generate code for jump tables.
761  bool supportJumpTables() const {
762    return SupportJumpTables;
763  }
764
765  /// Return integer threshold on number of blocks to use jump tables rather
766  /// than if sequence.
767  int getMinimumJumpTableEntries() const {
768    return MinimumJumpTableEntries;
769  }
770
771  /// If a physical register, this specifies the register that
772  /// llvm.savestack/llvm.restorestack should save and restore.
773  unsigned getStackPointerRegisterToSaveRestore() const {
774    return StackPointerRegisterToSaveRestore;
775  }
776
777  /// If a physical register, this returns the register that receives the
778  /// exception address on entry to a landing pad.
779  unsigned getExceptionPointerRegister() const {
780    return ExceptionPointerRegister;
781  }
782
783  /// If a physical register, this returns the register that receives the
784  /// exception typeid on entry to a landing pad.
785  unsigned getExceptionSelectorRegister() const {
786    return ExceptionSelectorRegister;
787  }
788
789  /// Returns the target's jmp_buf size in bytes (if never set, the default is
790  /// 200)
791  unsigned getJumpBufSize() const {
792    return JumpBufSize;
793  }
794
795  /// Returns the target's jmp_buf alignment in bytes (if never set, the default
796  /// is 0)
797  unsigned getJumpBufAlignment() const {
798    return JumpBufAlignment;
799  }
800
801  /// Return the minimum stack alignment of an argument.
802  unsigned getMinStackArgumentAlignment() const {
803    return MinStackArgumentAlignment;
804  }
805
806  /// Return the minimum function alignment.
807  unsigned getMinFunctionAlignment() const {
808    return MinFunctionAlignment;
809  }
810
811  /// Return the preferred function alignment.
812  unsigned getPrefFunctionAlignment() const {
813    return PrefFunctionAlignment;
814  }
815
816  /// Return the preferred loop alignment.
817  unsigned getPrefLoopAlignment() const {
818    return PrefLoopAlignment;
819  }
820
821  /// Return whether the DAG builder should automatically insert fences and
822  /// reduce ordering for atomics.
823  bool getInsertFencesForAtomic() const {
824    return InsertFencesForAtomic;
825  }
826
827  /// Return true if the target stores stack protector cookies at a fixed offset
828  /// in some non-standard address space, and populates the address space and
829  /// offset as appropriate.
830  virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
831                                      unsigned &/*Offset*/) const {
832    return false;
833  }
834
835  /// Returns the maximal possible offset which can be used for loads / stores
836  /// from the global.
837  virtual unsigned getMaximalGlobalOffset() const {
838    return 0;
839  }
840
841  /// Returns true if a cast between SrcAS and DestAS is a noop.
842  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
843    return false;
844  }
845
846  //===--------------------------------------------------------------------===//
847  /// \name Helpers for TargetTransformInfo implementations
848  /// @{
849
850  /// Get the ISD node that corresponds to the Instruction class opcode.
851  int InstructionOpcodeToISD(unsigned Opcode) const;
852
853  /// Estimate the cost of type-legalization and the legalized type.
854  std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
855
856  /// @}
857
858  //===--------------------------------------------------------------------===//
859  // TargetLowering Configuration Methods - These methods should be invoked by
860  // the derived class constructor to configure this object for the target.
861  //
862
863  /// \brief Reset the operation actions based on target options.
864  virtual void resetOperationActions() {}
865
866protected:
867  /// Specify how the target extends the result of a boolean value from i1 to a
868  /// wider type.  See getBooleanContents.
869  void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
870
871  /// Specify how the target extends the result of a vector boolean value from a
872  /// vector of i1 to a wider type.  See getBooleanContents.
873  void setBooleanVectorContents(BooleanContent Ty) {
874    BooleanVectorContents = Ty;
875  }
876
877  /// Specify the target scheduling preference.
878  void setSchedulingPreference(Sched::Preference Pref) {
879    SchedPreferenceInfo = Pref;
880  }
881
882  /// Indicate whether this target prefers to use _setjmp to implement
883  /// llvm.setjmp or the non _ version.  Defaults to false.
884  void setUseUnderscoreSetJmp(bool Val) {
885    UseUnderscoreSetJmp = Val;
886  }
887
888  /// Indicate whether this target prefers to use _longjmp to implement
889  /// llvm.longjmp or the non _ version.  Defaults to false.
890  void setUseUnderscoreLongJmp(bool Val) {
891    UseUnderscoreLongJmp = Val;
892  }
893
894  /// Indicate whether the target can generate code for jump tables.
895  void setSupportJumpTables(bool Val) {
896    SupportJumpTables = Val;
897  }
898
899  /// Indicate the number of blocks to generate jump tables rather than if
900  /// sequence.
901  void setMinimumJumpTableEntries(int Val) {
902    MinimumJumpTableEntries = Val;
903  }
904
905  /// If set to a physical register, this specifies the register that
906  /// llvm.savestack/llvm.restorestack should save and restore.
907  void setStackPointerRegisterToSaveRestore(unsigned R) {
908    StackPointerRegisterToSaveRestore = R;
909  }
910
911  /// If set to a physical register, this sets the register that receives the
912  /// exception address on entry to a landing pad.
913  void setExceptionPointerRegister(unsigned R) {
914    ExceptionPointerRegister = R;
915  }
916
917  /// If set to a physical register, this sets the register that receives the
918  /// exception typeid on entry to a landing pad.
919  void setExceptionSelectorRegister(unsigned R) {
920    ExceptionSelectorRegister = R;
921  }
922
923  /// Tells the code generator not to expand operations into sequences that use
924  /// the select operations if possible.
925  void setSelectIsExpensive(bool isExpensive = true) {
926    SelectIsExpensive = isExpensive;
927  }
928
929  /// Tells the code generator not to expand sequence of operations into a
930  /// separate sequences that increases the amount of flow control.
931  void setJumpIsExpensive(bool isExpensive = true) {
932    JumpIsExpensive = isExpensive;
933  }
934
935  /// Tells the code generator that integer divide is expensive, and if
936  /// possible, should be replaced by an alternate sequence of instructions not
937  /// containing an integer divide.
938  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
939
940  /// Tells the code generator which bitwidths to bypass.
941  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
942    BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
943  }
944
945  /// Tells the code generator that it shouldn't generate srl/add/sra for a
946  /// signed divide by power of two, and let the target handle it.
947  void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
948
949  /// Add the specified register class as an available regclass for the
950  /// specified value type. This indicates the selector can handle values of
951  /// that class natively.
952  void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
953    assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
954    AvailableRegClasses.push_back(std::make_pair(VT, RC));
955    RegClassForVT[VT.SimpleTy] = RC;
956  }
957
958  /// Remove all register classes.
959  void clearRegisterClasses() {
960    memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
961
962    AvailableRegClasses.clear();
963  }
964
965  /// \brief Remove all operation actions.
966  void clearOperationActions() {
967  }
968
969  /// Return the largest legal super-reg register class of the register class
970  /// for the specified type and its associated "cost".
971  virtual std::pair<const TargetRegisterClass*, uint8_t>
972  findRepresentativeClass(MVT VT) const;
973
974  /// Once all of the register classes are added, this allows us to compute
975  /// derived properties we expose.
976  void computeRegisterProperties();
977
978  /// Indicate that the specified operation does not work with the specified
979  /// type and indicate what to do about it.
980  void setOperationAction(unsigned Op, MVT VT,
981                          LegalizeAction Action) {
982    assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
983    OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
984  }
985
986  /// Indicate that the specified load with extension does not work with the
987  /// specified type and indicate what to do about it.
988  void setLoadExtAction(unsigned ExtType, MVT VT,
989                        LegalizeAction Action) {
990    assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
991           "Table isn't big enough!");
992    LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
993  }
994
995  /// Indicate that the specified truncating store does not work with the
996  /// specified type and indicate what to do about it.
997  void setTruncStoreAction(MVT ValVT, MVT MemVT,
998                           LegalizeAction Action) {
999    assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
1000           "Table isn't big enough!");
1001    TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1002  }
1003
1004  /// Indicate that the specified indexed load does or does not work with the
1005  /// specified type and indicate what to do abort it.
1006  ///
1007  /// NOTE: All indexed mode loads are initialized to Expand in
1008  /// TargetLowering.cpp
1009  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1010                            LegalizeAction Action) {
1011    assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1012           (unsigned)Action < 0xf && "Table isn't big enough!");
1013    // Load action are kept in the upper half.
1014    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1015    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1016  }
1017
1018  /// Indicate that the specified indexed store does or does not work with the
1019  /// specified type and indicate what to do about it.
1020  ///
1021  /// NOTE: All indexed mode stores are initialized to Expand in
1022  /// TargetLowering.cpp
1023  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1024                             LegalizeAction Action) {
1025    assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1026           (unsigned)Action < 0xf && "Table isn't big enough!");
1027    // Store action are kept in the lower half.
1028    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1029    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1030  }
1031
1032  /// Indicate that the specified condition code is or isn't supported on the
1033  /// target and indicate what to do about it.
1034  void setCondCodeAction(ISD::CondCode CC, MVT VT,
1035                         LegalizeAction Action) {
1036    assert(VT < MVT::LAST_VALUETYPE &&
1037           (unsigned)CC < array_lengthof(CondCodeActions) &&
1038           "Table isn't big enough!");
1039    /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
1040    /// value and the upper 27 bits index into the second dimension of the array
1041    /// to select what 32-bit value to use.
1042    uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
1043    CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
1044    CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
1045  }
1046
1047  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1048  /// to trying a larger integer/fp until it can find one that works. If that
1049  /// default is insufficient, this method can be used by the target to override
1050  /// the default.
1051  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1052    PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1053  }
1054
1055  /// Targets should invoke this method for each target independent node that
1056  /// they want to provide a custom DAG combiner for by implementing the
1057  /// PerformDAGCombine virtual method.
1058  void setTargetDAGCombine(ISD::NodeType NT) {
1059    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1060    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1061  }
1062
1063  /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1064  void setJumpBufSize(unsigned Size) {
1065    JumpBufSize = Size;
1066  }
1067
1068  /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1069  /// 0
1070  void setJumpBufAlignment(unsigned Align) {
1071    JumpBufAlignment = Align;
1072  }
1073
1074  /// Set the target's minimum function alignment (in log2(bytes))
1075  void setMinFunctionAlignment(unsigned Align) {
1076    MinFunctionAlignment = Align;
1077  }
1078
1079  /// Set the target's preferred function alignment.  This should be set if
1080  /// there is a performance benefit to higher-than-minimum alignment (in
1081  /// log2(bytes))
1082  void setPrefFunctionAlignment(unsigned Align) {
1083    PrefFunctionAlignment = Align;
1084  }
1085
1086  /// Set the target's preferred loop alignment. Default alignment is zero, it
1087  /// means the target does not care about loop alignment.  The alignment is
1088  /// specified in log2(bytes).
1089  void setPrefLoopAlignment(unsigned Align) {
1090    PrefLoopAlignment = Align;
1091  }
1092
1093  /// Set the minimum stack alignment of an argument (in log2(bytes)).
1094  void setMinStackArgumentAlignment(unsigned Align) {
1095    MinStackArgumentAlignment = Align;
1096  }
1097
1098  /// Set if the DAG builder should automatically insert fences and reduce the
1099  /// order of atomic memory operations to Monotonic.
1100  void setInsertFencesForAtomic(bool fence) {
1101    InsertFencesForAtomic = fence;
1102  }
1103
1104public:
1105  //===--------------------------------------------------------------------===//
1106  // Addressing mode description hooks (used by LSR etc).
1107  //
1108
1109  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1110  /// instructions reading the address. This allows as much computation as
1111  /// possible to be done in the address mode for that operand. This hook lets
1112  /// targets also pass back when this should be done on intrinsics which
1113  /// load/store.
1114  virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1115                                    SmallVectorImpl<Value*> &/*Ops*/,
1116                                    Type *&/*AccessTy*/) const {
1117    return false;
1118  }
1119
1120  /// This represents an addressing mode of:
1121  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1122  /// If BaseGV is null,  there is no BaseGV.
1123  /// If BaseOffs is zero, there is no base offset.
1124  /// If HasBaseReg is false, there is no base register.
1125  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1126  /// no scale.
1127  struct AddrMode {
1128    GlobalValue *BaseGV;
1129    int64_t      BaseOffs;
1130    bool         HasBaseReg;
1131    int64_t      Scale;
1132    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1133  };
1134
1135  /// Return true if the addressing mode represented by AM is legal for this
1136  /// target, for a load/store of the specified type.
1137  ///
1138  /// The type may be VoidTy, in which case only return true if the addressing
1139  /// mode is legal for a load/store of any legal type.  TODO: Handle
1140  /// pre/postinc as well.
1141  virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1142
1143  /// \brief Return the cost of the scaling factor used in the addressing mode
1144  /// represented by AM for this target, for a load/store of the specified type.
1145  ///
1146  /// If the AM is supported, the return value must be >= 0.
1147  /// If the AM is not supported, it returns a negative value.
1148  /// TODO: Handle pre/postinc as well.
1149  virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
1150    // Default: assume that any scaling factor used in a legal AM is free.
1151    if (isLegalAddressingMode(AM, Ty)) return 0;
1152    return -1;
1153  }
1154
1155  /// Return true if the specified immediate is legal icmp immediate, that is
1156  /// the target has icmp instructions which can compare a register against the
1157  /// immediate without having to materialize the immediate into a register.
1158  virtual bool isLegalICmpImmediate(int64_t) const {
1159    return true;
1160  }
1161
1162  /// Return true if the specified immediate is legal add immediate, that is the
1163  /// target has add instructions which can add a register with the immediate
1164  /// without having to materialize the immediate into a register.
1165  virtual bool isLegalAddImmediate(int64_t) const {
1166    return true;
1167  }
1168
1169  /// Return true if it's free to truncate a value of type Ty1 to type
1170  /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1171  /// by referencing its sub-register AX.
1172  virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1173    return false;
1174  }
1175
1176  /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1177  /// whether a call is in tail position. Typically this means that both results
1178  /// would be assigned to the same register or stack slot, but it could mean
1179  /// the target performs adequate checks of its own before proceeding with the
1180  /// tail call.
1181  virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1182    return false;
1183  }
1184
1185  virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1186    return false;
1187  }
1188
1189  /// Return true if any actual instruction that defines a value of type Ty1
1190  /// implicitly zero-extends the value to Ty2 in the result register.
1191  ///
1192  /// This does not necessarily include registers defined in unknown ways, such
1193  /// as incoming arguments, or copies from unknown virtual registers. Also, if
1194  /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1195  /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1196  /// values implicit zero-extend the result out to 64 bits.
1197  virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1198    return false;
1199  }
1200
1201  virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1202    return false;
1203  }
1204
1205  /// Return true if the target supplies and combines to a paired load
1206  /// two loaded values of type LoadedType next to each other in memory.
1207  /// RequiredAlignment gives the minimal alignment constraints that must be met
1208  /// to be able to select this paired load.
1209  ///
1210  /// This information is *not* used to generate actual paired loads, but it is
1211  /// used to generate a sequence of loads that is easier to combine into a
1212  /// paired load.
1213  /// For instance, something like this:
1214  /// a = load i64* addr
1215  /// b = trunc i64 a to i32
1216  /// c = lshr i64 a, 32
1217  /// d = trunc i64 c to i32
1218  /// will be optimized into:
1219  /// b = load i32* addr1
1220  /// d = load i32* addr2
1221  /// Where addr1 = addr2 +/- sizeof(i32).
1222  ///
1223  /// In other words, unless the target performs a post-isel load combining,
1224  /// this information should not be provided because it will generate more
1225  /// loads.
1226  virtual bool hasPairedLoad(Type * /*LoadedType*/,
1227                             unsigned & /*RequiredAligment*/) const {
1228    return false;
1229  }
1230
1231  virtual bool hasPairedLoad(EVT /*LoadedType*/,
1232                             unsigned & /*RequiredAligment*/) const {
1233    return false;
1234  }
1235
1236  /// Return true if zero-extending the specific node Val to type VT2 is free
1237  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1238  /// because it's folded such as X86 zero-extending loads).
1239  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1240    return isZExtFree(Val.getValueType(), VT2);
1241  }
1242
1243  /// Return true if an fneg operation is free to the point where it is never
1244  /// worthwhile to replace it with a bitwise operation.
1245  virtual bool isFNegFree(EVT VT) const {
1246    assert(VT.isFloatingPoint());
1247    return false;
1248  }
1249
1250  /// Return true if an fabs operation is free to the point where it is never
1251  /// worthwhile to replace it with a bitwise operation.
1252  virtual bool isFAbsFree(EVT VT) const {
1253    assert(VT.isFloatingPoint());
1254    return false;
1255  }
1256
1257  /// Return true if an FMA operation is faster than a pair of fmul and fadd
1258  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1259  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1260  ///
1261  /// NOTE: This may be called before legalization on types for which FMAs are
1262  /// not legal, but should return true if those types will eventually legalize
1263  /// to types that support FMAs. After legalization, it will only be called on
1264  /// types that support FMAs (via Legal or Custom actions)
1265  virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1266    return false;
1267  }
1268
1269  /// Return true if it's profitable to narrow operations of type VT1 to
1270  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1271  /// i32 to i16.
1272  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1273    return false;
1274  }
1275
1276  //===--------------------------------------------------------------------===//
1277  // Runtime Library hooks
1278  //
1279
1280  /// Rename the default libcall routine name for the specified libcall.
1281  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1282    LibcallRoutineNames[Call] = Name;
1283  }
1284
1285  /// Get the libcall routine name for the specified libcall.
1286  const char *getLibcallName(RTLIB::Libcall Call) const {
1287    return LibcallRoutineNames[Call];
1288  }
1289
1290  /// Override the default CondCode to be used to test the result of the
1291  /// comparison libcall against zero.
1292  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1293    CmpLibcallCCs[Call] = CC;
1294  }
1295
1296  /// Get the CondCode that's to be used to test the result of the comparison
1297  /// libcall against zero.
1298  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1299    return CmpLibcallCCs[Call];
1300  }
1301
1302  /// Set the CallingConv that should be used for the specified libcall.
1303  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1304    LibcallCallingConvs[Call] = CC;
1305  }
1306
1307  /// Get the CallingConv that should be used for the specified libcall.
1308  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1309    return LibcallCallingConvs[Call];
1310  }
1311
1312private:
1313  const TargetMachine &TM;
1314  const DataLayout *TD;
1315  const TargetLoweringObjectFile &TLOF;
1316
1317  /// True if this is a little endian target.
1318  bool IsLittleEndian;
1319
1320  /// Tells the code generator not to expand operations into sequences that use
1321  /// the select operations if possible.
1322  bool SelectIsExpensive;
1323
1324  /// Tells the code generator not to expand integer divides by constants into a
1325  /// sequence of muls, adds, and shifts.  This is a hack until a real cost
1326  /// model is in place.  If we ever optimize for size, this will be set to true
1327  /// unconditionally.
1328  bool IntDivIsCheap;
1329
1330  /// Tells the code generator to bypass slow divide or remainder
1331  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1332  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1333  /// div/rem when the operands are positive and less than 256.
1334  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1335
1336  /// Tells the code generator that it shouldn't generate srl/add/sra for a
1337  /// signed divide by power of two, and let the target handle it.
1338  bool Pow2DivIsCheap;
1339
1340  /// Tells the code generator that it shouldn't generate extra flow control
1341  /// instructions and should attempt to combine flow control instructions via
1342  /// predication.
1343  bool JumpIsExpensive;
1344
1345  /// This target prefers to use _setjmp to implement llvm.setjmp.
1346  ///
1347  /// Defaults to false.
1348  bool UseUnderscoreSetJmp;
1349
1350  /// This target prefers to use _longjmp to implement llvm.longjmp.
1351  ///
1352  /// Defaults to false.
1353  bool UseUnderscoreLongJmp;
1354
1355  /// Whether the target can generate code for jumptables.  If it's not true,
1356  /// then each jumptable must be lowered into if-then-else's.
1357  bool SupportJumpTables;
1358
1359  /// Number of blocks threshold to use jump tables.
1360  int MinimumJumpTableEntries;
1361
1362  /// Information about the contents of the high-bits in boolean values held in
1363  /// a type wider than i1. See getBooleanContents.
1364  BooleanContent BooleanContents;
1365
1366  /// Information about the contents of the high-bits in boolean vector values
1367  /// when the element type is wider than i1. See getBooleanContents.
1368  BooleanContent BooleanVectorContents;
1369
1370  /// The target scheduling preference: shortest possible total cycles or lowest
1371  /// register usage.
1372  Sched::Preference SchedPreferenceInfo;
1373
1374  /// The size, in bytes, of the target's jmp_buf buffers
1375  unsigned JumpBufSize;
1376
1377  /// The alignment, in bytes, of the target's jmp_buf buffers
1378  unsigned JumpBufAlignment;
1379
1380  /// The minimum alignment that any argument on the stack needs to have.
1381  unsigned MinStackArgumentAlignment;
1382
1383  /// The minimum function alignment (used when optimizing for size, and to
1384  /// prevent explicitly provided alignment from leading to incorrect code).
1385  unsigned MinFunctionAlignment;
1386
1387  /// The preferred function alignment (used when alignment unspecified and
1388  /// optimizing for speed).
1389  unsigned PrefFunctionAlignment;
1390
1391  /// The preferred loop alignment.
1392  unsigned PrefLoopAlignment;
1393
1394  /// Whether the DAG builder should automatically insert fences and reduce
1395  /// ordering for atomics.  (This will be set for for most architectures with
1396  /// weak memory ordering.)
1397  bool InsertFencesForAtomic;
1398
1399  /// If set to a physical register, this specifies the register that
1400  /// llvm.savestack/llvm.restorestack should save and restore.
1401  unsigned StackPointerRegisterToSaveRestore;
1402
1403  /// If set to a physical register, this specifies the register that receives
1404  /// the exception address on entry to a landing pad.
1405  unsigned ExceptionPointerRegister;
1406
1407  /// If set to a physical register, this specifies the register that receives
1408  /// the exception typeid on entry to a landing pad.
1409  unsigned ExceptionSelectorRegister;
1410
1411  /// This indicates the default register class to use for each ValueType the
1412  /// target supports natively.
1413  const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1414  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1415  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1416
1417  /// This indicates the "representative" register class to use for each
1418  /// ValueType the target supports natively. This information is used by the
1419  /// scheduler to track register pressure. By default, the representative
1420  /// register class is the largest legal super-reg register class of the
1421  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1422  /// representative class would be GR32.
1423  const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1424
1425  /// This indicates the "cost" of the "representative" register class for each
1426  /// ValueType. The cost is used by the scheduler to approximate register
1427  /// pressure.
1428  uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1429
1430  /// For any value types we are promoting or expanding, this contains the value
1431  /// type that we are changing to.  For Expanded types, this contains one step
1432  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1433  /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
1434  /// the same type (e.g. i32 -> i32).
1435  MVT TransformToType[MVT::LAST_VALUETYPE];
1436
1437  /// For each operation and each value type, keep a LegalizeAction that
1438  /// indicates how instruction selection should deal with the operation.  Most
1439  /// operations are Legal (aka, supported natively by the target), but
1440  /// operations that are not should be described.  Note that operations on
1441  /// non-legal value types are not described here.
1442  uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1443
1444  /// For each load extension type and each value type, keep a LegalizeAction
1445  /// that indicates how instruction selection should deal with a load of a
1446  /// specific value type and extension type.
1447  uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1448
1449  /// For each value type pair keep a LegalizeAction that indicates whether a
1450  /// truncating store of a specific value type and truncating type is legal.
1451  uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1452
1453  /// For each indexed mode and each value type, keep a pair of LegalizeAction
1454  /// that indicates how instruction selection should deal with the load /
1455  /// store.
1456  ///
1457  /// The first dimension is the value_type for the reference. The second
1458  /// dimension represents the various modes for load store.
1459  uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1460
1461  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1462  /// indicates how instruction selection should deal with the condition code.
1463  ///
1464  /// Because each CC action takes up 2 bits, we need to have the array size be
1465  /// large enough to fit all of the value types. This can be done by rounding
1466  /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
1467  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
1468
1469  ValueTypeActionImpl ValueTypeActions;
1470
1471public:
1472  LegalizeKind
1473  getTypeConversion(LLVMContext &Context, EVT VT) const {
1474    // If this is a simple type, use the ComputeRegisterProp mechanism.
1475    if (VT.isSimple()) {
1476      MVT SVT = VT.getSimpleVT();
1477      assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1478      MVT NVT = TransformToType[SVT.SimpleTy];
1479      LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1480
1481      assert(
1482        (LA == TypeLegal ||
1483         ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
1484         && "Promote may not follow Expand or Promote");
1485
1486      if (LA == TypeSplitVector)
1487        return LegalizeKind(LA, EVT::getVectorVT(Context,
1488                                                 SVT.getVectorElementType(),
1489                                                 SVT.getVectorNumElements()/2));
1490      if (LA == TypeScalarizeVector)
1491        return LegalizeKind(LA, SVT.getVectorElementType());
1492      return LegalizeKind(LA, NVT);
1493    }
1494
1495    // Handle Extended Scalar Types.
1496    if (!VT.isVector()) {
1497      assert(VT.isInteger() && "Float types must be simple");
1498      unsigned BitSize = VT.getSizeInBits();
1499      // First promote to a power-of-two size, then expand if necessary.
1500      if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1501        EVT NVT = VT.getRoundIntegerType(Context);
1502        assert(NVT != VT && "Unable to round integer VT");
1503        LegalizeKind NextStep = getTypeConversion(Context, NVT);
1504        // Avoid multi-step promotion.
1505        if (NextStep.first == TypePromoteInteger) return NextStep;
1506        // Return rounded integer type.
1507        return LegalizeKind(TypePromoteInteger, NVT);
1508      }
1509
1510      return LegalizeKind(TypeExpandInteger,
1511                          EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
1512    }
1513
1514    // Handle vector types.
1515    unsigned NumElts = VT.getVectorNumElements();
1516    EVT EltVT = VT.getVectorElementType();
1517
1518    // Vectors with only one element are always scalarized.
1519    if (NumElts == 1)
1520      return LegalizeKind(TypeScalarizeVector, EltVT);
1521
1522    // Try to widen vector elements until the element type is a power of two and
1523    // promote it to a legal type later on, for example:
1524    // <3 x i8> -> <4 x i8> -> <4 x i32>
1525    if (EltVT.isInteger()) {
1526      // Vectors with a number of elements that is not a power of two are always
1527      // widened, for example <3 x i8> -> <4 x i8>.
1528      if (!VT.isPow2VectorType()) {
1529        NumElts = (unsigned)NextPowerOf2(NumElts);
1530        EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1531        return LegalizeKind(TypeWidenVector, NVT);
1532      }
1533
1534      // Examine the element type.
1535      LegalizeKind LK = getTypeConversion(Context, EltVT);
1536
1537      // If type is to be expanded, split the vector.
1538      //  <4 x i140> -> <2 x i140>
1539      if (LK.first == TypeExpandInteger)
1540        return LegalizeKind(TypeSplitVector,
1541                            EVT::getVectorVT(Context, EltVT, NumElts / 2));
1542
1543      // Promote the integer element types until a legal vector type is found
1544      // or until the element integer type is too big. If a legal type was not
1545      // found, fallback to the usual mechanism of widening/splitting the
1546      // vector.
1547      EVT OldEltVT = EltVT;
1548      while (1) {
1549        // Increase the bitwidth of the element to the next pow-of-two
1550        // (which is greater than 8 bits).
1551        EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
1552                                 ).getRoundIntegerType(Context);
1553
1554        // Stop trying when getting a non-simple element type.
1555        // Note that vector elements may be greater than legal vector element
1556        // types. Example: X86 XMM registers hold 64bit element on 32bit
1557        // systems.
1558        if (!EltVT.isSimple()) break;
1559
1560        // Build a new vector type and check if it is legal.
1561        MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1562        // Found a legal promoted vector type.
1563        if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1564          return LegalizeKind(TypePromoteInteger,
1565                              EVT::getVectorVT(Context, EltVT, NumElts));
1566      }
1567
1568      // Reset the type to the unexpanded type if we did not find a legal vector
1569      // type with a promoted vector element type.
1570      EltVT = OldEltVT;
1571    }
1572
1573    // Try to widen the vector until a legal type is found.
1574    // If there is no wider legal type, split the vector.
1575    while (1) {
1576      // Round up to the next power of 2.
1577      NumElts = (unsigned)NextPowerOf2(NumElts);
1578
1579      // If there is no simple vector type with this many elements then there
1580      // cannot be a larger legal vector type.  Note that this assumes that
1581      // there are no skipped intermediate vector types in the simple types.
1582      if (!EltVT.isSimple()) break;
1583      MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1584      if (LargerVector == MVT()) break;
1585
1586      // If this type is legal then widen the vector.
1587      if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1588        return LegalizeKind(TypeWidenVector, LargerVector);
1589    }
1590
1591    // Widen odd vectors to next power of two.
1592    if (!VT.isPow2VectorType()) {
1593      EVT NVT = VT.getPow2VectorType(Context);
1594      return LegalizeKind(TypeWidenVector, NVT);
1595    }
1596
1597    // Vectors with illegal element types are expanded.
1598    EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1599    return LegalizeKind(TypeSplitVector, NVT);
1600  }
1601
1602private:
1603  std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1604
1605  /// Targets can specify ISD nodes that they would like PerformDAGCombine
1606  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1607  /// array.
1608  unsigned char
1609  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1610
1611  /// For operations that must be promoted to a specific type, this holds the
1612  /// destination type.  This map should be sparse, so don't hold it as an
1613  /// array.
1614  ///
1615  /// Targets add entries to this map with AddPromotedToType(..), clients access
1616  /// this with getTypeToPromoteTo(..).
1617  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1618    PromoteToType;
1619
1620  /// Stores the name each libcall.
1621  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1622
1623  /// The ISD::CondCode that should be used to test the result of each of the
1624  /// comparison libcall against zero.
1625  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1626
1627  /// Stores the CallingConv that should be used for each libcall.
1628  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1629
1630protected:
1631  /// \brief Specify maximum number of store instructions per memset call.
1632  ///
1633  /// When lowering \@llvm.memset this field specifies the maximum number of
1634  /// store operations that may be substituted for the call to memset. Targets
1635  /// must set this value based on the cost threshold for that target. Targets
1636  /// should assume that the memset will be done using as many of the largest
1637  /// store operations first, followed by smaller ones, if necessary, per
1638  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1639  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1640  /// store.  This only applies to setting a constant array of a constant size.
1641  unsigned MaxStoresPerMemset;
1642
1643  /// Maximum number of stores operations that may be substituted for the call
1644  /// to memset, used for functions with OptSize attribute.
1645  unsigned MaxStoresPerMemsetOptSize;
1646
1647  /// \brief Specify maximum bytes of store instructions per memcpy call.
1648  ///
1649  /// When lowering \@llvm.memcpy this field specifies the maximum number of
1650  /// store operations that may be substituted for a call to memcpy. Targets
1651  /// must set this value based on the cost threshold for that target. Targets
1652  /// should assume that the memcpy will be done using as many of the largest
1653  /// store operations first, followed by smaller ones, if necessary, per
1654  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1655  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1656  /// and one 1-byte store. This only applies to copying a constant array of
1657  /// constant size.
1658  unsigned MaxStoresPerMemcpy;
1659
1660  /// Maximum number of store operations that may be substituted for a call to
1661  /// memcpy, used for functions with OptSize attribute.
1662  unsigned MaxStoresPerMemcpyOptSize;
1663
1664  /// \brief Specify maximum bytes of store instructions per memmove call.
1665  ///
1666  /// When lowering \@llvm.memmove this field specifies the maximum number of
1667  /// store instructions that may be substituted for a call to memmove. Targets
1668  /// must set this value based on the cost threshold for that target. Targets
1669  /// should assume that the memmove will be done using as many of the largest
1670  /// store operations first, followed by smaller ones, if necessary, per
1671  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1672  /// with 8-bit alignment would result in nine 1-byte stores.  This only
1673  /// applies to copying a constant array of constant size.
1674  unsigned MaxStoresPerMemmove;
1675
1676  /// Maximum number of store instructions that may be substituted for a call to
1677  /// memmove, used for functions with OpSize attribute.
1678  unsigned MaxStoresPerMemmoveOptSize;
1679
1680  /// Tells the code generator that select is more expensive than a branch if
1681  /// the branch is usually predicted right.
1682  bool PredictableSelectIsExpensive;
1683
1684protected:
1685  /// Return true if the value types that can be represented by the specified
1686  /// register class are all legal.
1687  bool isLegalRC(const TargetRegisterClass *RC) const;
1688};
1689
1690/// This class defines information used to lower LLVM code to legal SelectionDAG
1691/// operators that the target instruction selector can accept natively.
1692///
1693/// This class also defines callbacks that targets must implement to lower
1694/// target-specific constructs to SelectionDAG operators.
1695class TargetLowering : public TargetLoweringBase {
1696  TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
1697  void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
1698
1699public:
1700  /// NOTE: The constructor takes ownership of TLOF.
1701  explicit TargetLowering(const TargetMachine &TM,
1702                          const TargetLoweringObjectFile *TLOF);
1703
1704  /// Returns true by value, base pointer and offset pointer and addressing mode
1705  /// by reference if the node's address can be legally represented as
1706  /// pre-indexed load / store address.
1707  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
1708                                         SDValue &/*Offset*/,
1709                                         ISD::MemIndexedMode &/*AM*/,
1710                                         SelectionDAG &/*DAG*/) const {
1711    return false;
1712  }
1713
1714  /// Returns true by value, base pointer and offset pointer and addressing mode
1715  /// by reference if this node can be combined with a load / store to form a
1716  /// post-indexed load / store.
1717  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
1718                                          SDValue &/*Base*/,
1719                                          SDValue &/*Offset*/,
1720                                          ISD::MemIndexedMode &/*AM*/,
1721                                          SelectionDAG &/*DAG*/) const {
1722    return false;
1723  }
1724
1725  /// Return the entry encoding for a jump table in the current function.  The
1726  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
1727  virtual unsigned getJumpTableEncoding() const;
1728
1729  virtual const MCExpr *
1730  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
1731                            const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
1732                            MCContext &/*Ctx*/) const {
1733    llvm_unreachable("Need to implement this hook if target has custom JTIs");
1734  }
1735
1736  /// Returns relocation base for the given PIC jumptable.
1737  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
1738                                           SelectionDAG &DAG) const;
1739
1740  /// This returns the relocation base for the given PIC jumptable, the same as
1741  /// getPICJumpTableRelocBase, but as an MCExpr.
1742  virtual const MCExpr *
1743  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
1744                               unsigned JTI, MCContext &Ctx) const;
1745
1746  /// Return true if folding a constant offset with the given GlobalAddress is
1747  /// legal.  It is frequently not legal in PIC relocation models.
1748  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
1749
1750  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
1751                            SDValue &Chain) const;
1752
1753  void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
1754                           SDValue &NewLHS, SDValue &NewRHS,
1755                           ISD::CondCode &CCCode, SDLoc DL) const;
1756
1757  /// Returns a pair of (return value, chain).
1758  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
1759                                          EVT RetVT, const SDValue *Ops,
1760                                          unsigned NumOps, bool isSigned,
1761                                          SDLoc dl, bool doesNotReturn = false,
1762                                          bool isReturnValueUsed = true) const;
1763
1764  //===--------------------------------------------------------------------===//
1765  // TargetLowering Optimization Methods
1766  //
1767
1768  /// A convenience struct that encapsulates a DAG, and two SDValues for
1769  /// returning information from TargetLowering to its clients that want to
1770  /// combine.
1771  struct TargetLoweringOpt {
1772    SelectionDAG &DAG;
1773    bool LegalTys;
1774    bool LegalOps;
1775    SDValue Old;
1776    SDValue New;
1777
1778    explicit TargetLoweringOpt(SelectionDAG &InDAG,
1779                               bool LT, bool LO) :
1780      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
1781
1782    bool LegalTypes() const { return LegalTys; }
1783    bool LegalOperations() const { return LegalOps; }
1784
1785    bool CombineTo(SDValue O, SDValue N) {
1786      Old = O;
1787      New = N;
1788      return true;
1789    }
1790
1791    /// Check to see if the specified operand of the specified instruction is a
1792    /// constant integer.  If so, check to see if there are any bits set in the
1793    /// constant that are not demanded.  If so, shrink the constant and return
1794    /// true.
1795    bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
1796
1797    /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
1798    /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
1799    /// generalized for targets with other types of implicit widening casts.
1800    bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
1801                          SDLoc dl);
1802  };
1803
1804  /// Look at Op.  At this point, we know that only the DemandedMask bits of the
1805  /// result of Op are ever used downstream.  If we can use this information to
1806  /// simplify Op, create a new simplified DAG node and return true, returning
1807  /// the original and new nodes in Old and New.  Otherwise, analyze the
1808  /// expression and return a mask of KnownOne and KnownZero bits for the
1809  /// expression (used to simplify the caller).  The KnownZero/One bits may only
1810  /// be accurate for those bits in the DemandedMask.
1811  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
1812                            APInt &KnownZero, APInt &KnownOne,
1813                            TargetLoweringOpt &TLO, unsigned Depth = 0) const;
1814
1815  /// Determine which of the bits specified in Mask are known to be either zero
1816  /// or one and return them in the KnownZero/KnownOne bitsets.
1817  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
1818                                              APInt &KnownZero,
1819                                              APInt &KnownOne,
1820                                              const SelectionDAG &DAG,
1821                                              unsigned Depth = 0) const;
1822
1823  /// This method can be implemented by targets that want to expose additional
1824  /// information about sign bits to the DAG Combiner.
1825  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
1826                                                   unsigned Depth = 0) const;
1827
1828  struct DAGCombinerInfo {
1829    void *DC;  // The DAG Combiner object.
1830    CombineLevel Level;
1831    bool CalledByLegalizer;
1832  public:
1833    SelectionDAG &DAG;
1834
1835    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
1836      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
1837
1838    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
1839    bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
1840    bool isAfterLegalizeVectorOps() const {
1841      return Level == AfterLegalizeDAG;
1842    }
1843    CombineLevel getDAGCombineLevel() { return Level; }
1844    bool isCalledByLegalizer() const { return CalledByLegalizer; }
1845
1846    void AddToWorklist(SDNode *N);
1847    void RemoveFromWorklist(SDNode *N);
1848    SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
1849                      bool AddTo = true);
1850    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
1851    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
1852
1853    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
1854  };
1855
1856  /// Try to simplify a setcc built with the specified operands and cc. If it is
1857  /// unable to simplify it, return a null SDValue.
1858  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1859                          ISD::CondCode Cond, bool foldBooleans,
1860                          DAGCombinerInfo &DCI, SDLoc dl) const;
1861
1862  /// Returns true (and the GlobalValue and the offset) if the node is a
1863  /// GlobalAddress + offset.
1864  virtual bool
1865  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
1866
1867  /// This method will be invoked for all target nodes and for any
1868  /// target-independent nodes that the target has registered with invoke it
1869  /// for.
1870  ///
1871  /// The semantics are as follows:
1872  /// Return Value:
1873  ///   SDValue.Val == 0   - No change was made
1874  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
1875  ///   otherwise          - N should be replaced by the returned Operand.
1876  ///
1877  /// In addition, methods provided by DAGCombinerInfo may be used to perform
1878  /// more complex transformations.
1879  ///
1880  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
1881
1882  /// Return true if the target has native support for the specified value type
1883  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
1884  /// i16 is legal, but undesirable since i16 instruction encodings are longer
1885  /// and some i16 instructions are slow.
1886  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
1887    // By default, assume all legal types are desirable.
1888    return isTypeLegal(VT);
1889  }
1890
1891  /// Return true if it is profitable for dag combiner to transform a floating
1892  /// point op of specified opcode to a equivalent op of an integer
1893  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
1894  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
1895                                                 EVT /*VT*/) const {
1896    return false;
1897  }
1898
1899  /// This method query the target whether it is beneficial for dag combiner to
1900  /// promote the specified node. If true, it should return the desired
1901  /// promotion type by reference.
1902  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
1903    return false;
1904  }
1905
1906  //===--------------------------------------------------------------------===//
1907  // Lowering methods - These methods must be implemented by targets so that
1908  // the SelectionDAGBuilder code knows how to lower these.
1909  //
1910
1911  /// This hook must be implemented to lower the incoming (formal) arguments,
1912  /// described by the Ins array, into the specified DAG. The implementation
1913  /// should fill in the InVals array with legal-type argument values, and
1914  /// return the resulting token chain value.
1915  ///
1916  virtual SDValue
1917    LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1918                         bool /*isVarArg*/,
1919                         const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
1920                         SDLoc /*dl*/, SelectionDAG &/*DAG*/,
1921                         SmallVectorImpl<SDValue> &/*InVals*/) const {
1922    llvm_unreachable("Not Implemented");
1923  }
1924
1925  struct ArgListEntry {
1926    SDValue Node;
1927    Type* Ty;
1928    bool isSExt     : 1;
1929    bool isZExt     : 1;
1930    bool isInReg    : 1;
1931    bool isSRet     : 1;
1932    bool isNest     : 1;
1933    bool isByVal    : 1;
1934    bool isReturned : 1;
1935    uint16_t Alignment;
1936
1937    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
1938      isSRet(false), isNest(false), isByVal(false), isReturned(false),
1939      Alignment(0) { }
1940
1941    void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
1942  };
1943  typedef std::vector<ArgListEntry> ArgListTy;
1944
1945  /// This structure contains all information that is necessary for lowering
1946  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
1947  /// needs to lower a call, and targets will see this struct in their LowerCall
1948  /// implementation.
1949  struct CallLoweringInfo {
1950    SDValue Chain;
1951    Type *RetTy;
1952    bool RetSExt           : 1;
1953    bool RetZExt           : 1;
1954    bool IsVarArg          : 1;
1955    bool IsInReg           : 1;
1956    bool DoesNotReturn     : 1;
1957    bool IsReturnValueUsed : 1;
1958
1959    // IsTailCall should be modified by implementations of
1960    // TargetLowering::LowerCall that perform tail call conversions.
1961    bool IsTailCall;
1962
1963    unsigned NumFixedArgs;
1964    CallingConv::ID CallConv;
1965    SDValue Callee;
1966    ArgListTy &Args;
1967    SelectionDAG &DAG;
1968    SDLoc DL;
1969    ImmutableCallSite *CS;
1970    SmallVector<ISD::OutputArg, 32> Outs;
1971    SmallVector<SDValue, 32> OutVals;
1972    SmallVector<ISD::InputArg, 32> Ins;
1973
1974
1975    /// Constructs a call lowering context based on the ImmutableCallSite \p cs.
1976    CallLoweringInfo(SDValue chain, Type *retTy,
1977                     FunctionType *FTy, bool isTailCall, SDValue callee,
1978                     ArgListTy &args, SelectionDAG &dag, SDLoc dl,
1979                     ImmutableCallSite &cs)
1980    : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
1981      RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
1982      IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
1983      DoesNotReturn(cs.doesNotReturn()),
1984      IsReturnValueUsed(!cs.getInstruction()->use_empty()),
1985      IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
1986      CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
1987      DL(dl), CS(&cs) {}
1988
1989    /// Constructs a call lowering context based on the provided call
1990    /// information.
1991    CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
1992                     bool isVarArg, bool isInReg, unsigned numFixedArgs,
1993                     CallingConv::ID callConv, bool isTailCall,
1994                     bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
1995                     ArgListTy &args, SelectionDAG &dag, SDLoc dl)
1996    : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
1997      IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
1998      IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
1999      NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
2000      Args(args), DAG(dag), DL(dl), CS(NULL) {}
2001  };
2002
2003  /// This function lowers an abstract call to a function into an actual call.
2004  /// This returns a pair of operands.  The first element is the return value
2005  /// for the function (if RetTy is not VoidTy).  The second element is the
2006  /// outgoing token chain. It calls LowerCall to do the actual lowering.
2007  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2008
2009  /// This hook must be implemented to lower calls into the the specified
2010  /// DAG. The outgoing arguments to the call are described by the Outs array,
2011  /// and the values to be returned by the call are described by the Ins
2012  /// array. The implementation should fill in the InVals array with legal-type
2013  /// return values from the call, and return the resulting token chain value.
2014  virtual SDValue
2015    LowerCall(CallLoweringInfo &/*CLI*/,
2016              SmallVectorImpl<SDValue> &/*InVals*/) const {
2017    llvm_unreachable("Not Implemented");
2018  }
2019
2020  /// Target-specific cleanup for formal ByVal parameters.
2021  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2022
2023  /// This hook should be implemented to check whether the return values
2024  /// described by the Outs array can fit into the return registers.  If false
2025  /// is returned, an sret-demotion is performed.
2026  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2027                              MachineFunction &/*MF*/, bool /*isVarArg*/,
2028               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2029               LLVMContext &/*Context*/) const
2030  {
2031    // Return true by default to get preexisting behavior.
2032    return true;
2033  }
2034
2035  /// This hook must be implemented to lower outgoing return values, described
2036  /// by the Outs array, into the specified DAG. The implementation should
2037  /// return the resulting token chain value.
2038  virtual SDValue
2039    LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2040                bool /*isVarArg*/,
2041                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2042                const SmallVectorImpl<SDValue> &/*OutVals*/,
2043                SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2044    llvm_unreachable("Not Implemented");
2045  }
2046
2047  /// Return true if result of the specified node is used by a return node
2048  /// only. It also compute and return the input chain for the tail call.
2049  ///
2050  /// This is used to determine whether it is possible to codegen a libcall as
2051  /// tail call at legalization time.
2052  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2053    return false;
2054  }
2055
2056  /// Return true if the target may be able emit the call instruction as a tail
2057  /// call. This is used by optimization passes to determine if it's profitable
2058  /// to duplicate return instructions to enable tailcall optimization.
2059  virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2060    return false;
2061  }
2062
2063  /// Return the type that should be used to zero or sign extend a
2064  /// zeroext/signext integer argument or return value.  FIXME: Most C calling
2065  /// convention requires the return type to be promoted, but this is not true
2066  /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2067  /// calling conventions. The frontend should handle this and include all of
2068  /// the necessary information.
2069  virtual MVT getTypeForExtArgOrReturn(MVT VT,
2070                                       ISD::NodeType /*ExtendKind*/) const {
2071    MVT MinVT = getRegisterType(MVT::i32);
2072    return VT.bitsLT(MinVT) ? MinVT : VT;
2073  }
2074
2075  /// Returns a 0 terminated array of registers that can be safely used as
2076  /// scratch registers.
2077  virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const {
2078    return NULL;
2079  }
2080
2081  /// This callback is invoked by the type legalizer to legalize nodes with an
2082  /// illegal operand type but legal result types.  It replaces the
2083  /// LowerOperation callback in the type Legalizer.  The reason we can not do
2084  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2085  /// use this callback.
2086  ///
2087  /// TODO: Consider merging with ReplaceNodeResults.
2088  ///
2089  /// The target places new result values for the node in Results (their number
2090  /// and types must exactly match those of the original return values of
2091  /// the node), or leaves Results empty, which indicates that the node is not
2092  /// to be custom lowered after all.
2093  /// The default implementation calls LowerOperation.
2094  virtual void LowerOperationWrapper(SDNode *N,
2095                                     SmallVectorImpl<SDValue> &Results,
2096                                     SelectionDAG &DAG) const;
2097
2098  /// This callback is invoked for operations that are unsupported by the
2099  /// target, which are registered to use 'custom' lowering, and whose defined
2100  /// values are all legal.  If the target has no operations that require custom
2101  /// lowering, it need not implement this.  The default implementation of this
2102  /// aborts.
2103  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2104
2105  /// This callback is invoked when a node result type is illegal for the
2106  /// target, and the operation was registered to use 'custom' lowering for that
2107  /// result type.  The target places new result values for the node in Results
2108  /// (their number and types must exactly match those of the original return
2109  /// values of the node), or leaves Results empty, which indicates that the
2110  /// node is not to be custom lowered after all.
2111  ///
2112  /// If the target has no operations that require custom lowering, it need not
2113  /// implement this.  The default implementation aborts.
2114  virtual void ReplaceNodeResults(SDNode * /*N*/,
2115                                  SmallVectorImpl<SDValue> &/*Results*/,
2116                                  SelectionDAG &/*DAG*/) const {
2117    llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2118  }
2119
2120  /// This method returns the name of a target specific DAG node.
2121  virtual const char *getTargetNodeName(unsigned Opcode) const;
2122
2123  /// This method returns a target specific FastISel object, or null if the
2124  /// target does not support "fast" ISel.
2125  virtual FastISel *createFastISel(FunctionLoweringInfo &,
2126                                   const TargetLibraryInfo *) const {
2127    return 0;
2128  }
2129
2130  //===--------------------------------------------------------------------===//
2131  // Inline Asm Support hooks
2132  //
2133
2134  /// This hook allows the target to expand an inline asm call to be explicit
2135  /// llvm code if it wants to.  This is useful for turning simple inline asms
2136  /// into LLVM intrinsics, which gives the compiler more information about the
2137  /// behavior of the code.
2138  virtual bool ExpandInlineAsm(CallInst *) const {
2139    return false;
2140  }
2141
2142  enum ConstraintType {
2143    C_Register,            // Constraint represents specific register(s).
2144    C_RegisterClass,       // Constraint represents any of register(s) in class.
2145    C_Memory,              // Memory constraint.
2146    C_Other,               // Something else.
2147    C_Unknown              // Unsupported constraint.
2148  };
2149
2150  enum ConstraintWeight {
2151    // Generic weights.
2152    CW_Invalid  = -1,     // No match.
2153    CW_Okay     = 0,      // Acceptable.
2154    CW_Good     = 1,      // Good weight.
2155    CW_Better   = 2,      // Better weight.
2156    CW_Best     = 3,      // Best weight.
2157
2158    // Well-known weights.
2159    CW_SpecificReg  = CW_Okay,    // Specific register operands.
2160    CW_Register     = CW_Good,    // Register operands.
2161    CW_Memory       = CW_Better,  // Memory operands.
2162    CW_Constant     = CW_Best,    // Constant operand.
2163    CW_Default      = CW_Okay     // Default or don't know type.
2164  };
2165
2166  /// This contains information for each constraint that we are lowering.
2167  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2168    /// This contains the actual string for the code, like "m".  TargetLowering
2169    /// picks the 'best' code from ConstraintInfo::Codes that most closely
2170    /// matches the operand.
2171    std::string ConstraintCode;
2172
2173    /// Information about the constraint code, e.g. Register, RegisterClass,
2174    /// Memory, Other, Unknown.
2175    TargetLowering::ConstraintType ConstraintType;
2176
2177    /// If this is the result output operand or a clobber, this is null,
2178    /// otherwise it is the incoming operand to the CallInst.  This gets
2179    /// modified as the asm is processed.
2180    Value *CallOperandVal;
2181
2182    /// The ValueType for the operand value.
2183    MVT ConstraintVT;
2184
2185    /// Return true of this is an input operand that is a matching constraint
2186    /// like "4".
2187    bool isMatchingInputConstraint() const;
2188
2189    /// If this is an input matching constraint, this method returns the output
2190    /// operand it matches.
2191    unsigned getMatchedOperand() const;
2192
2193    /// Copy constructor for copying from an AsmOperandInfo.
2194    AsmOperandInfo(const AsmOperandInfo &info)
2195      : InlineAsm::ConstraintInfo(info),
2196        ConstraintCode(info.ConstraintCode),
2197        ConstraintType(info.ConstraintType),
2198        CallOperandVal(info.CallOperandVal),
2199        ConstraintVT(info.ConstraintVT) {
2200    }
2201
2202    /// Copy constructor for copying from a ConstraintInfo.
2203    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
2204      : InlineAsm::ConstraintInfo(info),
2205        ConstraintType(TargetLowering::C_Unknown),
2206        CallOperandVal(0), ConstraintVT(MVT::Other) {
2207    }
2208  };
2209
2210  typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2211
2212  /// Split up the constraint string from the inline assembly value into the
2213  /// specific constraints and their prefixes, and also tie in the associated
2214  /// operand values.  If this returns an empty vector, and if the constraint
2215  /// string itself isn't empty, there was an error parsing.
2216  virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
2217
2218  /// Examine constraint type and operand type and determine a weight value.
2219  /// The operand object must already have been set up with the operand type.
2220  virtual ConstraintWeight getMultipleConstraintMatchWeight(
2221      AsmOperandInfo &info, int maIndex) const;
2222
2223  /// Examine constraint string and operand type and determine a weight value.
2224  /// The operand object must already have been set up with the operand type.
2225  virtual ConstraintWeight getSingleConstraintMatchWeight(
2226      AsmOperandInfo &info, const char *constraint) const;
2227
2228  /// Determines the constraint code and constraint type to use for the specific
2229  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2230  /// If the actual operand being passed in is available, it can be passed in as
2231  /// Op, otherwise an empty SDValue can be passed.
2232  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2233                                      SDValue Op,
2234                                      SelectionDAG *DAG = 0) const;
2235
2236  /// Given a constraint, return the type of constraint it is for this target.
2237  virtual ConstraintType getConstraintType(const std::string &Constraint) const;
2238
2239  /// Given a physical register constraint (e.g.  {edx}), return the register
2240  /// number and the register class for the register.
2241  ///
2242  /// Given a register class constraint, like 'r', if this corresponds directly
2243  /// to an LLVM register class, return a register of 0 and the register class
2244  /// pointer.
2245  ///
2246  /// This should only be used for C_Register constraints.  On error, this
2247  /// returns a register number of 0 and a null register class pointer..
2248  virtual std::pair<unsigned, const TargetRegisterClass*>
2249    getRegForInlineAsmConstraint(const std::string &Constraint,
2250                                 MVT VT) const;
2251
2252  /// Try to replace an X constraint, which matches anything, with another that
2253  /// has more specific requirements based on the type of the corresponding
2254  /// operand.  This returns null if there is no replacement to make.
2255  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2256
2257  /// Lower the specified operand into the Ops vector.  If it is invalid, don't
2258  /// add anything to Ops.
2259  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2260                                            std::vector<SDValue> &Ops,
2261                                            SelectionDAG &DAG) const;
2262
2263  //===--------------------------------------------------------------------===//
2264  // Div utility functions
2265  //
2266  SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2267                         SelectionDAG &DAG) const;
2268  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
2269                      std::vector<SDNode*> *Created) const;
2270  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
2271                      std::vector<SDNode*> *Created) const;
2272
2273  //===--------------------------------------------------------------------===//
2274  // Instruction Emitting Hooks
2275  //
2276
2277  /// This method should be implemented by targets that mark instructions with
2278  /// the 'usesCustomInserter' flag.  These instructions are special in various
2279  /// ways, which require special support to insert.  The specified MachineInstr
2280  /// is created but not inserted into any basic blocks, and this method is
2281  /// called to expand it into a sequence of instructions, potentially also
2282  /// creating new basic blocks and control flow.
2283  virtual MachineBasicBlock *
2284    EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2285
2286  /// This method should be implemented by targets that mark instructions with
2287  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2288  /// instruction selection by target hooks.  e.g. To fill in optional defs for
2289  /// ARM 's' setting instructions.
2290  virtual void
2291  AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2292};
2293
2294/// Given an LLVM IR type and return type attributes, compute the return value
2295/// EVTs and flags, and optionally also the offsets, if the return value is
2296/// being lowered to memory.
2297void GetReturnInfo(Type* ReturnType, AttributeSet attr,
2298                   SmallVectorImpl<ISD::OutputArg> &Outs,
2299                   const TargetLowering &TLI);
2300
2301} // end llvm namespace
2302
2303#endif
2304