1251607Sdim//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2251607Sdim//
3251607Sdim//                     The LLVM Compiler Infrastructure
4251607Sdim//
5251607Sdim// This file is distributed under the University of Illinois Open Source
6251607Sdim// License. See LICENSE.TXT for details.
7251607Sdim//
8251607Sdim//===----------------------------------------------------------------------===//
9251607Sdim//
10251607Sdim// This file implements the SystemZTargetLowering class.
11251607Sdim//
12251607Sdim//===----------------------------------------------------------------------===//
13251607Sdim
14251607Sdim#define DEBUG_TYPE "systemz-lower"
15251607Sdim
16251607Sdim#include "SystemZISelLowering.h"
17251607Sdim#include "SystemZCallingConv.h"
18251607Sdim#include "SystemZConstantPoolValue.h"
19251607Sdim#include "SystemZMachineFunctionInfo.h"
20251607Sdim#include "SystemZTargetMachine.h"
21251607Sdim#include "llvm/CodeGen/CallingConvLower.h"
22251607Sdim#include "llvm/CodeGen/MachineInstrBuilder.h"
23251607Sdim#include "llvm/CodeGen/MachineRegisterInfo.h"
24251607Sdim#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25251607Sdim
26263509Sdim#include <cctype>
27263509Sdim
28251607Sdimusing namespace llvm;
29251607Sdim
30263509Sdimnamespace {
31263509Sdim// Represents a sequence for extracting a 0/1 value from an IPM result:
32263509Sdim// (((X ^ XORValue) + AddValue) >> Bit)
33263509Sdimstruct IPMConversion {
34263509Sdim  IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
35263509Sdim    : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
36263509Sdim
37263509Sdim  int64_t XORValue;
38263509Sdim  int64_t AddValue;
39263509Sdim  unsigned Bit;
40263509Sdim};
41263509Sdim}
42263509Sdim
43251607Sdim// Classify VT as either 32 or 64 bit.
44251607Sdimstatic bool is32Bit(EVT VT) {
45251607Sdim  switch (VT.getSimpleVT().SimpleTy) {
46251607Sdim  case MVT::i32:
47251607Sdim    return true;
48251607Sdim  case MVT::i64:
49251607Sdim    return false;
50251607Sdim  default:
51251607Sdim    llvm_unreachable("Unsupported type");
52251607Sdim  }
53251607Sdim}
54251607Sdim
55251607Sdim// Return a version of MachineOperand that can be safely used before the
56251607Sdim// final use.
57251607Sdimstatic MachineOperand earlyUseOperand(MachineOperand Op) {
58251607Sdim  if (Op.isReg())
59251607Sdim    Op.setIsKill(false);
60251607Sdim  return Op;
61251607Sdim}
62251607Sdim
63251607SdimSystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
64251607Sdim  : TargetLowering(tm, new TargetLoweringObjectFileELF()),
65251607Sdim    Subtarget(*tm.getSubtargetImpl()), TM(tm) {
66251607Sdim  MVT PtrVT = getPointerTy();
67251607Sdim
68251607Sdim  // Set up the register classes.
69263509Sdim  if (Subtarget.hasHighWord())
70263509Sdim    addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
71263509Sdim  else
72263509Sdim    addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
73251607Sdim  addRegisterClass(MVT::i64,  &SystemZ::GR64BitRegClass);
74251607Sdim  addRegisterClass(MVT::f32,  &SystemZ::FP32BitRegClass);
75251607Sdim  addRegisterClass(MVT::f64,  &SystemZ::FP64BitRegClass);
76251607Sdim  addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
77251607Sdim
78251607Sdim  // Compute derived properties from the register classes
79251607Sdim  computeRegisterProperties();
80251607Sdim
81251607Sdim  // Set up special registers.
82251607Sdim  setExceptionPointerRegister(SystemZ::R6D);
83251607Sdim  setExceptionSelectorRegister(SystemZ::R7D);
84251607Sdim  setStackPointerRegisterToSaveRestore(SystemZ::R15D);
85251607Sdim
86251607Sdim  // TODO: It may be better to default to latency-oriented scheduling, however
87251607Sdim  // LLVM's current latency-oriented scheduler can't handle physreg definitions
88263509Sdim  // such as SystemZ has with CC, so set this to the register-pressure
89251607Sdim  // scheduler, because it can.
90251607Sdim  setSchedulingPreference(Sched::RegPressure);
91251607Sdim
92251607Sdim  setBooleanContents(ZeroOrOneBooleanContent);
93251607Sdim  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
94251607Sdim
95251607Sdim  // Instructions are strings of 2-byte aligned 2-byte values.
96251607Sdim  setMinFunctionAlignment(2);
97251607Sdim
98251607Sdim  // Handle operations that are handled in a similar way for all types.
99251607Sdim  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
100251607Sdim       I <= MVT::LAST_FP_VALUETYPE;
101251607Sdim       ++I) {
102251607Sdim    MVT VT = MVT::SimpleValueType(I);
103251607Sdim    if (isTypeLegal(VT)) {
104263509Sdim      // Lower SET_CC into an IPM-based sequence.
105263509Sdim      setOperationAction(ISD::SETCC, VT, Custom);
106251607Sdim
107251607Sdim      // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
108251607Sdim      setOperationAction(ISD::SELECT, VT, Expand);
109251607Sdim
110251607Sdim      // Lower SELECT_CC and BR_CC into separate comparisons and branches.
111251607Sdim      setOperationAction(ISD::SELECT_CC, VT, Custom);
112251607Sdim      setOperationAction(ISD::BR_CC,     VT, Custom);
113251607Sdim    }
114251607Sdim  }
115251607Sdim
116251607Sdim  // Expand jump table branches as address arithmetic followed by an
117251607Sdim  // indirect jump.
118251607Sdim  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
119251607Sdim
120251607Sdim  // Expand BRCOND into a BR_CC (see above).
121251607Sdim  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
122251607Sdim
123251607Sdim  // Handle integer types.
124251607Sdim  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
125251607Sdim       I <= MVT::LAST_INTEGER_VALUETYPE;
126251607Sdim       ++I) {
127251607Sdim    MVT VT = MVT::SimpleValueType(I);
128251607Sdim    if (isTypeLegal(VT)) {
129251607Sdim      // Expand individual DIV and REMs into DIVREMs.
130251607Sdim      setOperationAction(ISD::SDIV, VT, Expand);
131251607Sdim      setOperationAction(ISD::UDIV, VT, Expand);
132251607Sdim      setOperationAction(ISD::SREM, VT, Expand);
133251607Sdim      setOperationAction(ISD::UREM, VT, Expand);
134251607Sdim      setOperationAction(ISD::SDIVREM, VT, Custom);
135251607Sdim      setOperationAction(ISD::UDIVREM, VT, Custom);
136251607Sdim
137251607Sdim      // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
138251607Sdim      // FIXME: probably much too conservative.
139251607Sdim      setOperationAction(ISD::ATOMIC_LOAD,  VT, Expand);
140251607Sdim      setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
141251607Sdim
142251607Sdim      // No special instructions for these.
143251607Sdim      setOperationAction(ISD::CTPOP,           VT, Expand);
144251607Sdim      setOperationAction(ISD::CTTZ,            VT, Expand);
145251607Sdim      setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
146251607Sdim      setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
147251607Sdim      setOperationAction(ISD::ROTR,            VT, Expand);
148251607Sdim
149263509Sdim      // Use *MUL_LOHI where possible instead of MULH*.
150251607Sdim      setOperationAction(ISD::MULHS, VT, Expand);
151251607Sdim      setOperationAction(ISD::MULHU, VT, Expand);
152263509Sdim      setOperationAction(ISD::SMUL_LOHI, VT, Custom);
153263509Sdim      setOperationAction(ISD::UMUL_LOHI, VT, Custom);
154251607Sdim
155251607Sdim      // We have instructions for signed but not unsigned FP conversion.
156251607Sdim      setOperationAction(ISD::FP_TO_UINT, VT, Expand);
157251607Sdim    }
158251607Sdim  }
159251607Sdim
160251607Sdim  // Type legalization will convert 8- and 16-bit atomic operations into
161251607Sdim  // forms that operate on i32s (but still keeping the original memory VT).
162251607Sdim  // Lower them into full i32 operations.
163251607Sdim  setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Custom);
164251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Custom);
165251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Custom);
166251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Custom);
167251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Custom);
168251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Custom);
169251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
170251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_MIN,  MVT::i32, Custom);
171251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_MAX,  MVT::i32, Custom);
172251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
173251607Sdim  setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
174251607Sdim  setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Custom);
175251607Sdim
176251607Sdim  // We have instructions for signed but not unsigned FP conversion.
177251607Sdim  // Handle unsigned 32-bit types as signed 64-bit types.
178251607Sdim  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
179251607Sdim  setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
180251607Sdim
181251607Sdim  // We have native support for a 64-bit CTLZ, via FLOGR.
182251607Sdim  setOperationAction(ISD::CTLZ, MVT::i32, Promote);
183251607Sdim  setOperationAction(ISD::CTLZ, MVT::i64, Legal);
184251607Sdim
185251607Sdim  // Give LowerOperation the chance to replace 64-bit ORs with subregs.
186251607Sdim  setOperationAction(ISD::OR, MVT::i64, Custom);
187251607Sdim
188251607Sdim  // FIXME: Can we support these natively?
189251607Sdim  setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
190251607Sdim  setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
191251607Sdim  setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
192251607Sdim
193251607Sdim  // We have native instructions for i8, i16 and i32 extensions, but not i1.
194251607Sdim  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
195251607Sdim  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
196251607Sdim  setLoadExtAction(ISD::EXTLOAD,  MVT::i1, Promote);
197251607Sdim  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
198251607Sdim
199251607Sdim  // Handle the various types of symbolic address.
200251607Sdim  setOperationAction(ISD::ConstantPool,     PtrVT, Custom);
201251607Sdim  setOperationAction(ISD::GlobalAddress,    PtrVT, Custom);
202251607Sdim  setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
203251607Sdim  setOperationAction(ISD::BlockAddress,     PtrVT, Custom);
204251607Sdim  setOperationAction(ISD::JumpTable,        PtrVT, Custom);
205251607Sdim
206251607Sdim  // We need to handle dynamic allocations specially because of the
207251607Sdim  // 160-byte area at the bottom of the stack.
208251607Sdim  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
209251607Sdim
210251607Sdim  // Use custom expanders so that we can force the function to use
211251607Sdim  // a frame pointer.
212251607Sdim  setOperationAction(ISD::STACKSAVE,    MVT::Other, Custom);
213251607Sdim  setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
214251607Sdim
215263509Sdim  // Handle prefetches with PFD or PFDRL.
216263509Sdim  setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
217251607Sdim
218251607Sdim  // Handle floating-point types.
219251607Sdim  for (unsigned I = MVT::FIRST_FP_VALUETYPE;
220251607Sdim       I <= MVT::LAST_FP_VALUETYPE;
221251607Sdim       ++I) {
222251607Sdim    MVT VT = MVT::SimpleValueType(I);
223251607Sdim    if (isTypeLegal(VT)) {
224251607Sdim      // We can use FI for FRINT.
225251607Sdim      setOperationAction(ISD::FRINT, VT, Legal);
226251607Sdim
227263509Sdim      // We can use the extended form of FI for other rounding operations.
228263509Sdim      if (Subtarget.hasFPExtension()) {
229263509Sdim        setOperationAction(ISD::FNEARBYINT, VT, Legal);
230263509Sdim        setOperationAction(ISD::FFLOOR, VT, Legal);
231263509Sdim        setOperationAction(ISD::FCEIL, VT, Legal);
232263509Sdim        setOperationAction(ISD::FTRUNC, VT, Legal);
233263509Sdim        setOperationAction(ISD::FROUND, VT, Legal);
234263509Sdim      }
235263509Sdim
236251607Sdim      // No special instructions for these.
237251607Sdim      setOperationAction(ISD::FSIN, VT, Expand);
238251607Sdim      setOperationAction(ISD::FCOS, VT, Expand);
239251607Sdim      setOperationAction(ISD::FREM, VT, Expand);
240251607Sdim    }
241251607Sdim  }
242251607Sdim
243251607Sdim  // We have fused multiply-addition for f32 and f64 but not f128.
244251607Sdim  setOperationAction(ISD::FMA, MVT::f32,  Legal);
245251607Sdim  setOperationAction(ISD::FMA, MVT::f64,  Legal);
246251607Sdim  setOperationAction(ISD::FMA, MVT::f128, Expand);
247251607Sdim
248251607Sdim  // Needed so that we don't try to implement f128 constant loads using
249251607Sdim  // a load-and-extend of a f80 constant (in cases where the constant
250251607Sdim  // would fit in an f80).
251251607Sdim  setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
252251607Sdim
253251607Sdim  // Floating-point truncation and stores need to be done separately.
254251607Sdim  setTruncStoreAction(MVT::f64,  MVT::f32, Expand);
255251607Sdim  setTruncStoreAction(MVT::f128, MVT::f32, Expand);
256251607Sdim  setTruncStoreAction(MVT::f128, MVT::f64, Expand);
257251607Sdim
258251607Sdim  // We have 64-bit FPR<->GPR moves, but need special handling for
259251607Sdim  // 32-bit forms.
260251607Sdim  setOperationAction(ISD::BITCAST, MVT::i32, Custom);
261251607Sdim  setOperationAction(ISD::BITCAST, MVT::f32, Custom);
262251607Sdim
263251607Sdim  // VASTART and VACOPY need to deal with the SystemZ-specific varargs
264251607Sdim  // structure, but VAEND is a no-op.
265251607Sdim  setOperationAction(ISD::VASTART, MVT::Other, Custom);
266251607Sdim  setOperationAction(ISD::VACOPY,  MVT::Other, Custom);
267251607Sdim  setOperationAction(ISD::VAEND,   MVT::Other, Expand);
268263509Sdim
269263509Sdim  // We want to use MVC in preference to even a single load/store pair.
270263509Sdim  MaxStoresPerMemcpy = 0;
271263509Sdim  MaxStoresPerMemcpyOptSize = 0;
272263509Sdim
273263509Sdim  // The main memset sequence is a byte store followed by an MVC.
274263509Sdim  // Two STC or MV..I stores win over that, but the kind of fused stores
275263509Sdim  // generated by target-independent code don't when the byte value is
276263509Sdim  // variable.  E.g.  "STC <reg>;MHI <reg>,257;STH <reg>" is not better
277263509Sdim  // than "STC;MVC".  Handle the choice in target-specific code instead.
278263509Sdim  MaxStoresPerMemset = 0;
279263509Sdim  MaxStoresPerMemsetOptSize = 0;
280251607Sdim}
281251607Sdim
282263509SdimEVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
283263509Sdim  if (!VT.isVector())
284263509Sdim    return MVT::i32;
285263509Sdim  return VT.changeVectorElementTypeToInteger();
286263509Sdim}
287263509Sdim
288263509Sdimbool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
289263509Sdim  VT = VT.getScalarType();
290263509Sdim
291263509Sdim  if (!VT.isSimple())
292263509Sdim    return false;
293263509Sdim
294263509Sdim  switch (VT.getSimpleVT().SimpleTy) {
295263509Sdim  case MVT::f32:
296263509Sdim  case MVT::f64:
297263509Sdim    return true;
298263509Sdim  case MVT::f128:
299263509Sdim    return false;
300263509Sdim  default:
301263509Sdim    break;
302263509Sdim  }
303263509Sdim
304263509Sdim  return false;
305263509Sdim}
306263509Sdim
307251607Sdimbool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
308251607Sdim  // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
309251607Sdim  return Imm.isZero() || Imm.isNegZero();
310251607Sdim}
311251607Sdim
312263509Sdimbool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
313263509Sdim                                                          bool *Fast) const {
314263509Sdim  // Unaligned accesses should never be slower than the expanded version.
315263509Sdim  // We check specifically for aligned accesses in the few cases where
316263509Sdim  // they are required.
317263509Sdim  if (Fast)
318263509Sdim    *Fast = true;
319263509Sdim  return true;
320263509Sdim}
321263509Sdim
322263509Sdimbool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
323263509Sdim                                                  Type *Ty) const {
324263509Sdim  // Punt on globals for now, although they can be used in limited
325263509Sdim  // RELATIVE LONG cases.
326263509Sdim  if (AM.BaseGV)
327263509Sdim    return false;
328263509Sdim
329263509Sdim  // Require a 20-bit signed offset.
330263509Sdim  if (!isInt<20>(AM.BaseOffs))
331263509Sdim    return false;
332263509Sdim
333263509Sdim  // Indexing is OK but no scale factor can be applied.
334263509Sdim  return AM.Scale == 0 || AM.Scale == 1;
335263509Sdim}
336263509Sdim
337263509Sdimbool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
338263509Sdim  if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
339263509Sdim    return false;
340263509Sdim  unsigned FromBits = FromType->getPrimitiveSizeInBits();
341263509Sdim  unsigned ToBits = ToType->getPrimitiveSizeInBits();
342263509Sdim  return FromBits > ToBits;
343263509Sdim}
344263509Sdim
345263509Sdimbool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
346263509Sdim  if (!FromVT.isInteger() || !ToVT.isInteger())
347263509Sdim    return false;
348263509Sdim  unsigned FromBits = FromVT.getSizeInBits();
349263509Sdim  unsigned ToBits = ToVT.getSizeInBits();
350263509Sdim  return FromBits > ToBits;
351263509Sdim}
352263509Sdim
353251607Sdim//===----------------------------------------------------------------------===//
354251607Sdim// Inline asm support
355251607Sdim//===----------------------------------------------------------------------===//
356251607Sdim
357251607SdimTargetLowering::ConstraintType
358251607SdimSystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
359251607Sdim  if (Constraint.size() == 1) {
360251607Sdim    switch (Constraint[0]) {
361251607Sdim    case 'a': // Address register
362251607Sdim    case 'd': // Data register (equivalent to 'r')
363251607Sdim    case 'f': // Floating-point register
364263509Sdim    case 'h': // High-part register
365251607Sdim    case 'r': // General-purpose register
366251607Sdim      return C_RegisterClass;
367251607Sdim
368251607Sdim    case 'Q': // Memory with base and unsigned 12-bit displacement
369251607Sdim    case 'R': // Likewise, plus an index
370251607Sdim    case 'S': // Memory with base and signed 20-bit displacement
371251607Sdim    case 'T': // Likewise, plus an index
372251607Sdim    case 'm': // Equivalent to 'T'.
373251607Sdim      return C_Memory;
374251607Sdim
375251607Sdim    case 'I': // Unsigned 8-bit constant
376251607Sdim    case 'J': // Unsigned 12-bit constant
377251607Sdim    case 'K': // Signed 16-bit constant
378251607Sdim    case 'L': // Signed 20-bit displacement (on all targets we support)
379251607Sdim    case 'M': // 0x7fffffff
380251607Sdim      return C_Other;
381251607Sdim
382251607Sdim    default:
383251607Sdim      break;
384251607Sdim    }
385251607Sdim  }
386251607Sdim  return TargetLowering::getConstraintType(Constraint);
387251607Sdim}
388251607Sdim
389251607SdimTargetLowering::ConstraintWeight SystemZTargetLowering::
390251607SdimgetSingleConstraintMatchWeight(AsmOperandInfo &info,
391251607Sdim                               const char *constraint) const {
392251607Sdim  ConstraintWeight weight = CW_Invalid;
393251607Sdim  Value *CallOperandVal = info.CallOperandVal;
394251607Sdim  // If we don't have a value, we can't do a match,
395251607Sdim  // but allow it at the lowest weight.
396251607Sdim  if (CallOperandVal == NULL)
397251607Sdim    return CW_Default;
398251607Sdim  Type *type = CallOperandVal->getType();
399251607Sdim  // Look at the constraint type.
400251607Sdim  switch (*constraint) {
401251607Sdim  default:
402251607Sdim    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
403251607Sdim    break;
404251607Sdim
405251607Sdim  case 'a': // Address register
406251607Sdim  case 'd': // Data register (equivalent to 'r')
407263509Sdim  case 'h': // High-part register
408251607Sdim  case 'r': // General-purpose register
409251607Sdim    if (CallOperandVal->getType()->isIntegerTy())
410251607Sdim      weight = CW_Register;
411251607Sdim    break;
412251607Sdim
413251607Sdim  case 'f': // Floating-point register
414251607Sdim    if (type->isFloatingPointTy())
415251607Sdim      weight = CW_Register;
416251607Sdim    break;
417251607Sdim
418251607Sdim  case 'I': // Unsigned 8-bit constant
419251607Sdim    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
420251607Sdim      if (isUInt<8>(C->getZExtValue()))
421251607Sdim        weight = CW_Constant;
422251607Sdim    break;
423251607Sdim
424251607Sdim  case 'J': // Unsigned 12-bit constant
425251607Sdim    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
426251607Sdim      if (isUInt<12>(C->getZExtValue()))
427251607Sdim        weight = CW_Constant;
428251607Sdim    break;
429251607Sdim
430251607Sdim  case 'K': // Signed 16-bit constant
431251607Sdim    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
432251607Sdim      if (isInt<16>(C->getSExtValue()))
433251607Sdim        weight = CW_Constant;
434251607Sdim    break;
435251607Sdim
436251607Sdim  case 'L': // Signed 20-bit displacement (on all targets we support)
437251607Sdim    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
438251607Sdim      if (isInt<20>(C->getSExtValue()))
439251607Sdim        weight = CW_Constant;
440251607Sdim    break;
441251607Sdim
442251607Sdim  case 'M': // 0x7fffffff
443251607Sdim    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
444251607Sdim      if (C->getZExtValue() == 0x7fffffff)
445251607Sdim        weight = CW_Constant;
446251607Sdim    break;
447251607Sdim  }
448251607Sdim  return weight;
449251607Sdim}
450251607Sdim
451263509Sdim// Parse a "{tNNN}" register constraint for which the register type "t"
452263509Sdim// has already been verified.  MC is the class associated with "t" and
453263509Sdim// Map maps 0-based register numbers to LLVM register numbers.
454263509Sdimstatic std::pair<unsigned, const TargetRegisterClass *>
455263509SdimparseRegisterNumber(const std::string &Constraint,
456263509Sdim                    const TargetRegisterClass *RC, const unsigned *Map) {
457263509Sdim  assert(*(Constraint.end()-1) == '}' && "Missing '}'");
458263509Sdim  if (isdigit(Constraint[2])) {
459263509Sdim    std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
460263509Sdim    unsigned Index = atoi(Suffix.c_str());
461263509Sdim    if (Index < 16 && Map[Index])
462263509Sdim      return std::make_pair(Map[Index], RC);
463263509Sdim  }
464263509Sdim  return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
465263509Sdim}
466263509Sdim
467251607Sdimstd::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
468263509SdimgetRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
469251607Sdim  if (Constraint.size() == 1) {
470251607Sdim    // GCC Constraint Letters
471251607Sdim    switch (Constraint[0]) {
472251607Sdim    default: break;
473251607Sdim    case 'd': // Data register (equivalent to 'r')
474251607Sdim    case 'r': // General-purpose register
475251607Sdim      if (VT == MVT::i64)
476251607Sdim        return std::make_pair(0U, &SystemZ::GR64BitRegClass);
477251607Sdim      else if (VT == MVT::i128)
478251607Sdim        return std::make_pair(0U, &SystemZ::GR128BitRegClass);
479251607Sdim      return std::make_pair(0U, &SystemZ::GR32BitRegClass);
480251607Sdim
481251607Sdim    case 'a': // Address register
482251607Sdim      if (VT == MVT::i64)
483251607Sdim        return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
484251607Sdim      else if (VT == MVT::i128)
485251607Sdim        return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
486251607Sdim      return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
487251607Sdim
488263509Sdim    case 'h': // High-part register (an LLVM extension)
489263509Sdim      return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
490263509Sdim
491251607Sdim    case 'f': // Floating-point register
492251607Sdim      if (VT == MVT::f64)
493251607Sdim        return std::make_pair(0U, &SystemZ::FP64BitRegClass);
494251607Sdim      else if (VT == MVT::f128)
495251607Sdim        return std::make_pair(0U, &SystemZ::FP128BitRegClass);
496251607Sdim      return std::make_pair(0U, &SystemZ::FP32BitRegClass);
497251607Sdim    }
498251607Sdim  }
499263509Sdim  if (Constraint[0] == '{') {
500263509Sdim    // We need to override the default register parsing for GPRs and FPRs
501263509Sdim    // because the interpretation depends on VT.  The internal names of
502263509Sdim    // the registers are also different from the external names
503263509Sdim    // (F0D and F0S instead of F0, etc.).
504263509Sdim    if (Constraint[1] == 'r') {
505263509Sdim      if (VT == MVT::i32)
506263509Sdim        return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
507263509Sdim                                   SystemZMC::GR32Regs);
508263509Sdim      if (VT == MVT::i128)
509263509Sdim        return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
510263509Sdim                                   SystemZMC::GR128Regs);
511263509Sdim      return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
512263509Sdim                                 SystemZMC::GR64Regs);
513263509Sdim    }
514263509Sdim    if (Constraint[1] == 'f') {
515263509Sdim      if (VT == MVT::f32)
516263509Sdim        return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
517263509Sdim                                   SystemZMC::FP32Regs);
518263509Sdim      if (VT == MVT::f128)
519263509Sdim        return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
520263509Sdim                                   SystemZMC::FP128Regs);
521263509Sdim      return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
522263509Sdim                                 SystemZMC::FP64Regs);
523263509Sdim    }
524263509Sdim  }
525251607Sdim  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
526251607Sdim}
527251607Sdim
528251607Sdimvoid SystemZTargetLowering::
529251607SdimLowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
530251607Sdim                             std::vector<SDValue> &Ops,
531251607Sdim                             SelectionDAG &DAG) const {
532251607Sdim  // Only support length 1 constraints for now.
533251607Sdim  if (Constraint.length() == 1) {
534251607Sdim    switch (Constraint[0]) {
535251607Sdim    case 'I': // Unsigned 8-bit constant
536251607Sdim      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
537251607Sdim        if (isUInt<8>(C->getZExtValue()))
538251607Sdim          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
539251607Sdim                                              Op.getValueType()));
540251607Sdim      return;
541251607Sdim
542251607Sdim    case 'J': // Unsigned 12-bit constant
543251607Sdim      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
544251607Sdim        if (isUInt<12>(C->getZExtValue()))
545251607Sdim          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
546251607Sdim                                              Op.getValueType()));
547251607Sdim      return;
548251607Sdim
549251607Sdim    case 'K': // Signed 16-bit constant
550251607Sdim      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
551251607Sdim        if (isInt<16>(C->getSExtValue()))
552251607Sdim          Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
553251607Sdim                                              Op.getValueType()));
554251607Sdim      return;
555251607Sdim
556251607Sdim    case 'L': // Signed 20-bit displacement (on all targets we support)
557251607Sdim      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
558251607Sdim        if (isInt<20>(C->getSExtValue()))
559251607Sdim          Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
560251607Sdim                                              Op.getValueType()));
561251607Sdim      return;
562251607Sdim
563251607Sdim    case 'M': // 0x7fffffff
564251607Sdim      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
565251607Sdim        if (C->getZExtValue() == 0x7fffffff)
566251607Sdim          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
567251607Sdim                                              Op.getValueType()));
568251607Sdim      return;
569251607Sdim    }
570251607Sdim  }
571251607Sdim  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
572251607Sdim}
573251607Sdim
574251607Sdim//===----------------------------------------------------------------------===//
575251607Sdim// Calling conventions
576251607Sdim//===----------------------------------------------------------------------===//
577251607Sdim
578251607Sdim#include "SystemZGenCallingConv.inc"
579251607Sdim
580263509Sdimbool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
581263509Sdim                                                     Type *ToType) const {
582263509Sdim  return isTruncateFree(FromType, ToType);
583263509Sdim}
584263509Sdim
585263509Sdimbool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
586263509Sdim  if (!CI->isTailCall())
587263509Sdim    return false;
588263509Sdim  return true;
589263509Sdim}
590263509Sdim
591251607Sdim// Value is a value that has been passed to us in the location described by VA
592251607Sdim// (and so has type VA.getLocVT()).  Convert Value to VA.getValVT(), chaining
593251607Sdim// any loads onto Chain.
594263509Sdimstatic SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
595251607Sdim                                   CCValAssign &VA, SDValue Chain,
596251607Sdim                                   SDValue Value) {
597251607Sdim  // If the argument has been promoted from a smaller type, insert an
598251607Sdim  // assertion to capture this.
599251607Sdim  if (VA.getLocInfo() == CCValAssign::SExt)
600251607Sdim    Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
601251607Sdim                        DAG.getValueType(VA.getValVT()));
602251607Sdim  else if (VA.getLocInfo() == CCValAssign::ZExt)
603251607Sdim    Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
604251607Sdim                        DAG.getValueType(VA.getValVT()));
605251607Sdim
606251607Sdim  if (VA.isExtInLoc())
607251607Sdim    Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
608251607Sdim  else if (VA.getLocInfo() == CCValAssign::Indirect)
609251607Sdim    Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
610251607Sdim                        MachinePointerInfo(), false, false, false, 0);
611251607Sdim  else
612251607Sdim    assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
613251607Sdim  return Value;
614251607Sdim}
615251607Sdim
616251607Sdim// Value is a value of type VA.getValVT() that we need to copy into
617251607Sdim// the location described by VA.  Return a copy of Value converted to
618251607Sdim// VA.getValVT().  The caller is responsible for handling indirect values.
619263509Sdimstatic SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
620251607Sdim                                   CCValAssign &VA, SDValue Value) {
621251607Sdim  switch (VA.getLocInfo()) {
622251607Sdim  case CCValAssign::SExt:
623251607Sdim    return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
624251607Sdim  case CCValAssign::ZExt:
625251607Sdim    return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
626251607Sdim  case CCValAssign::AExt:
627251607Sdim    return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
628251607Sdim  case CCValAssign::Full:
629251607Sdim    return Value;
630251607Sdim  default:
631251607Sdim    llvm_unreachable("Unhandled getLocInfo()");
632251607Sdim  }
633251607Sdim}
634251607Sdim
635251607SdimSDValue SystemZTargetLowering::
636251607SdimLowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
637251607Sdim                     const SmallVectorImpl<ISD::InputArg> &Ins,
638263509Sdim                     SDLoc DL, SelectionDAG &DAG,
639251607Sdim                     SmallVectorImpl<SDValue> &InVals) const {
640251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
641251607Sdim  MachineFrameInfo *MFI = MF.getFrameInfo();
642251607Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
643251607Sdim  SystemZMachineFunctionInfo *FuncInfo =
644251607Sdim    MF.getInfo<SystemZMachineFunctionInfo>();
645251607Sdim  const SystemZFrameLowering *TFL =
646251607Sdim    static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
647251607Sdim
648251607Sdim  // Assign locations to all of the incoming arguments.
649251607Sdim  SmallVector<CCValAssign, 16> ArgLocs;
650251607Sdim  CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
651251607Sdim  CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
652251607Sdim
653251607Sdim  unsigned NumFixedGPRs = 0;
654251607Sdim  unsigned NumFixedFPRs = 0;
655251607Sdim  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
656251607Sdim    SDValue ArgValue;
657251607Sdim    CCValAssign &VA = ArgLocs[I];
658251607Sdim    EVT LocVT = VA.getLocVT();
659251607Sdim    if (VA.isRegLoc()) {
660251607Sdim      // Arguments passed in registers
661251607Sdim      const TargetRegisterClass *RC;
662251607Sdim      switch (LocVT.getSimpleVT().SimpleTy) {
663251607Sdim      default:
664251607Sdim        // Integers smaller than i64 should be promoted to i64.
665251607Sdim        llvm_unreachable("Unexpected argument type");
666251607Sdim      case MVT::i32:
667251607Sdim        NumFixedGPRs += 1;
668251607Sdim        RC = &SystemZ::GR32BitRegClass;
669251607Sdim        break;
670251607Sdim      case MVT::i64:
671251607Sdim        NumFixedGPRs += 1;
672251607Sdim        RC = &SystemZ::GR64BitRegClass;
673251607Sdim        break;
674251607Sdim      case MVT::f32:
675251607Sdim        NumFixedFPRs += 1;
676251607Sdim        RC = &SystemZ::FP32BitRegClass;
677251607Sdim        break;
678251607Sdim      case MVT::f64:
679251607Sdim        NumFixedFPRs += 1;
680251607Sdim        RC = &SystemZ::FP64BitRegClass;
681251607Sdim        break;
682251607Sdim      }
683251607Sdim
684251607Sdim      unsigned VReg = MRI.createVirtualRegister(RC);
685251607Sdim      MRI.addLiveIn(VA.getLocReg(), VReg);
686251607Sdim      ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
687251607Sdim    } else {
688251607Sdim      assert(VA.isMemLoc() && "Argument not register or memory");
689251607Sdim
690251607Sdim      // Create the frame index object for this incoming parameter.
691251607Sdim      int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
692251607Sdim                                      VA.getLocMemOffset(), true);
693251607Sdim
694251607Sdim      // Create the SelectionDAG nodes corresponding to a load
695251607Sdim      // from this parameter.  Unpromoted ints and floats are
696251607Sdim      // passed as right-justified 8-byte values.
697251607Sdim      EVT PtrVT = getPointerTy();
698251607Sdim      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
699251607Sdim      if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
700251607Sdim        FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4));
701251607Sdim      ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
702251607Sdim                             MachinePointerInfo::getFixedStack(FI),
703251607Sdim                             false, false, false, 0);
704251607Sdim    }
705251607Sdim
706251607Sdim    // Convert the value of the argument register into the value that's
707251607Sdim    // being passed.
708251607Sdim    InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
709251607Sdim  }
710251607Sdim
711251607Sdim  if (IsVarArg) {
712251607Sdim    // Save the number of non-varargs registers for later use by va_start, etc.
713251607Sdim    FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
714251607Sdim    FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
715251607Sdim
716251607Sdim    // Likewise the address (in the form of a frame index) of where the
717251607Sdim    // first stack vararg would be.  The 1-byte size here is arbitrary.
718251607Sdim    int64_t StackSize = CCInfo.getNextStackOffset();
719251607Sdim    FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true));
720251607Sdim
721251607Sdim    // ...and a similar frame index for the caller-allocated save area
722251607Sdim    // that will be used to store the incoming registers.
723251607Sdim    int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
724251607Sdim    unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true);
725251607Sdim    FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
726251607Sdim
727251607Sdim    // Store the FPR varargs in the reserved frame slots.  (We store the
728251607Sdim    // GPRs as part of the prologue.)
729251607Sdim    if (NumFixedFPRs < SystemZ::NumArgFPRs) {
730251607Sdim      SDValue MemOps[SystemZ::NumArgFPRs];
731251607Sdim      for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
732251607Sdim        unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
733251607Sdim        int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true);
734251607Sdim        SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
735251607Sdim        unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
736251607Sdim                                     &SystemZ::FP64BitRegClass);
737251607Sdim        SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
738251607Sdim        MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
739251607Sdim                                 MachinePointerInfo::getFixedStack(FI),
740251607Sdim                                 false, false, 0);
741251607Sdim
742251607Sdim      }
743251607Sdim      // Join the stores, which are independent of one another.
744251607Sdim      Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
745251607Sdim                          &MemOps[NumFixedFPRs],
746251607Sdim                          SystemZ::NumArgFPRs - NumFixedFPRs);
747251607Sdim    }
748251607Sdim  }
749251607Sdim
750251607Sdim  return Chain;
751251607Sdim}
752251607Sdim
753263509Sdimstatic bool canUseSiblingCall(CCState ArgCCInfo,
754263509Sdim                              SmallVectorImpl<CCValAssign> &ArgLocs) {
755263509Sdim  // Punt if there are any indirect or stack arguments, or if the call
756263509Sdim  // needs the call-saved argument register R6.
757263509Sdim  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
758263509Sdim    CCValAssign &VA = ArgLocs[I];
759263509Sdim    if (VA.getLocInfo() == CCValAssign::Indirect)
760263509Sdim      return false;
761263509Sdim    if (!VA.isRegLoc())
762263509Sdim      return false;
763263509Sdim    unsigned Reg = VA.getLocReg();
764263509Sdim    if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
765263509Sdim      return false;
766263509Sdim  }
767263509Sdim  return true;
768263509Sdim}
769263509Sdim
770251607SdimSDValue
771251607SdimSystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
772251607Sdim                                 SmallVectorImpl<SDValue> &InVals) const {
773251607Sdim  SelectionDAG &DAG = CLI.DAG;
774263509Sdim  SDLoc &DL = CLI.DL;
775263509Sdim  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
776263509Sdim  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
777263509Sdim  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
778251607Sdim  SDValue Chain = CLI.Chain;
779251607Sdim  SDValue Callee = CLI.Callee;
780263509Sdim  bool &IsTailCall = CLI.IsTailCall;
781251607Sdim  CallingConv::ID CallConv = CLI.CallConv;
782251607Sdim  bool IsVarArg = CLI.IsVarArg;
783251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
784251607Sdim  EVT PtrVT = getPointerTy();
785251607Sdim
786251607Sdim  // Analyze the operands of the call, assigning locations to each operand.
787251607Sdim  SmallVector<CCValAssign, 16> ArgLocs;
788251607Sdim  CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
789251607Sdim  ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
790251607Sdim
791263509Sdim  // We don't support GuaranteedTailCallOpt, only automatically-detected
792263509Sdim  // sibling calls.
793263509Sdim  if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
794263509Sdim    IsTailCall = false;
795263509Sdim
796251607Sdim  // Get a count of how many bytes are to be pushed on the stack.
797251607Sdim  unsigned NumBytes = ArgCCInfo.getNextStackOffset();
798251607Sdim
799251607Sdim  // Mark the start of the call.
800263509Sdim  if (!IsTailCall)
801263509Sdim    Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
802263509Sdim                                 DL);
803251607Sdim
804251607Sdim  // Copy argument values to their designated locations.
805251607Sdim  SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
806251607Sdim  SmallVector<SDValue, 8> MemOpChains;
807251607Sdim  SDValue StackPtr;
808251607Sdim  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
809251607Sdim    CCValAssign &VA = ArgLocs[I];
810251607Sdim    SDValue ArgValue = OutVals[I];
811251607Sdim
812251607Sdim    if (VA.getLocInfo() == CCValAssign::Indirect) {
813251607Sdim      // Store the argument in a stack slot and pass its address.
814251607Sdim      SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
815251607Sdim      int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
816251607Sdim      MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot,
817251607Sdim                                         MachinePointerInfo::getFixedStack(FI),
818251607Sdim                                         false, false, 0));
819251607Sdim      ArgValue = SpillSlot;
820251607Sdim    } else
821251607Sdim      ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
822251607Sdim
823251607Sdim    if (VA.isRegLoc())
824251607Sdim      // Queue up the argument copies and emit them at the end.
825251607Sdim      RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
826251607Sdim    else {
827251607Sdim      assert(VA.isMemLoc() && "Argument not register or memory");
828251607Sdim
829251607Sdim      // Work out the address of the stack slot.  Unpromoted ints and
830251607Sdim      // floats are passed as right-justified 8-byte values.
831251607Sdim      if (!StackPtr.getNode())
832251607Sdim        StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
833251607Sdim      unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
834251607Sdim      if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
835251607Sdim        Offset += 4;
836251607Sdim      SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
837251607Sdim                                    DAG.getIntPtrConstant(Offset));
838251607Sdim
839251607Sdim      // Emit the store.
840251607Sdim      MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
841251607Sdim                                         MachinePointerInfo(),
842251607Sdim                                         false, false, 0));
843251607Sdim    }
844251607Sdim  }
845251607Sdim
846251607Sdim  // Join the stores, which are independent of one another.
847251607Sdim  if (!MemOpChains.empty())
848251607Sdim    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
849251607Sdim                        &MemOpChains[0], MemOpChains.size());
850251607Sdim
851263509Sdim  // Accept direct calls by converting symbolic call addresses to the
852263509Sdim  // associated Target* opcodes.  Force %r1 to be used for indirect
853263509Sdim  // tail calls.
854251607Sdim  SDValue Glue;
855251607Sdim  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
856251607Sdim    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
857251607Sdim    Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
858251607Sdim  } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
859251607Sdim    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
860251607Sdim    Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
861263509Sdim  } else if (IsTailCall) {
862263509Sdim    Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
863263509Sdim    Glue = Chain.getValue(1);
864263509Sdim    Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
865251607Sdim  }
866251607Sdim
867263509Sdim  // Build a sequence of copy-to-reg nodes, chained and glued together.
868263509Sdim  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
869263509Sdim    Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
870263509Sdim                             RegsToPass[I].second, Glue);
871263509Sdim    Glue = Chain.getValue(1);
872263509Sdim  }
873263509Sdim
874251607Sdim  // The first call operand is the chain and the second is the target address.
875251607Sdim  SmallVector<SDValue, 8> Ops;
876251607Sdim  Ops.push_back(Chain);
877251607Sdim  Ops.push_back(Callee);
878251607Sdim
879251607Sdim  // Add argument registers to the end of the list so that they are
880251607Sdim  // known live into the call.
881251607Sdim  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
882251607Sdim    Ops.push_back(DAG.getRegister(RegsToPass[I].first,
883251607Sdim                                  RegsToPass[I].second.getValueType()));
884251607Sdim
885251607Sdim  // Glue the call to the argument copies, if any.
886251607Sdim  if (Glue.getNode())
887251607Sdim    Ops.push_back(Glue);
888251607Sdim
889251607Sdim  // Emit the call.
890251607Sdim  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
891263509Sdim  if (IsTailCall)
892263509Sdim    return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
893251607Sdim  Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
894251607Sdim  Glue = Chain.getValue(1);
895251607Sdim
896251607Sdim  // Mark the end of the call, which is glued to the call itself.
897251607Sdim  Chain = DAG.getCALLSEQ_END(Chain,
898251607Sdim                             DAG.getConstant(NumBytes, PtrVT, true),
899251607Sdim                             DAG.getConstant(0, PtrVT, true),
900263509Sdim                             Glue, DL);
901251607Sdim  Glue = Chain.getValue(1);
902251607Sdim
903251607Sdim  // Assign locations to each value returned by this call.
904251607Sdim  SmallVector<CCValAssign, 16> RetLocs;
905251607Sdim  CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
906251607Sdim  RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
907251607Sdim
908251607Sdim  // Copy all of the result registers out of their specified physreg.
909251607Sdim  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
910251607Sdim    CCValAssign &VA = RetLocs[I];
911251607Sdim
912251607Sdim    // Copy the value out, gluing the copy to the end of the call sequence.
913251607Sdim    SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
914251607Sdim                                          VA.getLocVT(), Glue);
915251607Sdim    Chain = RetValue.getValue(1);
916251607Sdim    Glue = RetValue.getValue(2);
917251607Sdim
918251607Sdim    // Convert the value of the return register into the value that's
919251607Sdim    // being returned.
920251607Sdim    InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
921251607Sdim  }
922251607Sdim
923251607Sdim  return Chain;
924251607Sdim}
925251607Sdim
926251607SdimSDValue
927251607SdimSystemZTargetLowering::LowerReturn(SDValue Chain,
928251607Sdim                                   CallingConv::ID CallConv, bool IsVarArg,
929251607Sdim                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
930251607Sdim                                   const SmallVectorImpl<SDValue> &OutVals,
931263509Sdim                                   SDLoc DL, SelectionDAG &DAG) const {
932251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
933251607Sdim
934251607Sdim  // Assign locations to each returned value.
935251607Sdim  SmallVector<CCValAssign, 16> RetLocs;
936251607Sdim  CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
937251607Sdim  RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
938251607Sdim
939251607Sdim  // Quick exit for void returns
940251607Sdim  if (RetLocs.empty())
941251607Sdim    return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
942251607Sdim
943251607Sdim  // Copy the result values into the output registers.
944251607Sdim  SDValue Glue;
945251607Sdim  SmallVector<SDValue, 4> RetOps;
946251607Sdim  RetOps.push_back(Chain);
947251607Sdim  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
948251607Sdim    CCValAssign &VA = RetLocs[I];
949251607Sdim    SDValue RetValue = OutVals[I];
950251607Sdim
951251607Sdim    // Make the return register live on exit.
952251607Sdim    assert(VA.isRegLoc() && "Can only return in registers!");
953251607Sdim
954251607Sdim    // Promote the value as required.
955251607Sdim    RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
956251607Sdim
957251607Sdim    // Chain and glue the copies together.
958251607Sdim    unsigned Reg = VA.getLocReg();
959251607Sdim    Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
960251607Sdim    Glue = Chain.getValue(1);
961251607Sdim    RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
962251607Sdim  }
963251607Sdim
964251607Sdim  // Update chain and glue.
965251607Sdim  RetOps[0] = Chain;
966251607Sdim  if (Glue.getNode())
967251607Sdim    RetOps.push_back(Glue);
968251607Sdim
969251607Sdim  return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
970251607Sdim                     RetOps.data(), RetOps.size());
971251607Sdim}
972251607Sdim
973251607Sdim// CC is a comparison that will be implemented using an integer or
974251607Sdim// floating-point comparison.  Return the condition code mask for
975251607Sdim// a branch on true.  In the integer case, CCMASK_CMP_UO is set for
976251607Sdim// unsigned comparisons and clear for signed ones.  In the floating-point
977251607Sdim// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
978251607Sdimstatic unsigned CCMaskForCondCode(ISD::CondCode CC) {
979251607Sdim#define CONV(X) \
980251607Sdim  case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
981251607Sdim  case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
982251607Sdim  case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
983251607Sdim
984251607Sdim  switch (CC) {
985251607Sdim  default:
986251607Sdim    llvm_unreachable("Invalid integer condition!");
987251607Sdim
988251607Sdim  CONV(EQ);
989251607Sdim  CONV(NE);
990251607Sdim  CONV(GT);
991251607Sdim  CONV(GE);
992251607Sdim  CONV(LT);
993251607Sdim  CONV(LE);
994251607Sdim
995251607Sdim  case ISD::SETO:  return SystemZ::CCMASK_CMP_O;
996251607Sdim  case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
997251607Sdim  }
998251607Sdim#undef CONV
999251607Sdim}
1000251607Sdim
1001263509Sdim// Return a sequence for getting a 1 from an IPM result when CC has a
1002263509Sdim// value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1003263509Sdim// The handling of CC values outside CCValid doesn't matter.
1004263509Sdimstatic IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1005263509Sdim  // Deal with cases where the result can be taken directly from a bit
1006263509Sdim  // of the IPM result.
1007263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1008263509Sdim    return IPMConversion(0, 0, SystemZ::IPM_CC);
1009263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1010263509Sdim    return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1011263509Sdim
1012263509Sdim  // Deal with cases where we can add a value to force the sign bit
1013263509Sdim  // to contain the right value.  Putting the bit in 31 means we can
1014263509Sdim  // use SRL rather than RISBG(L), and also makes it easier to get a
1015263509Sdim  // 0/-1 value, so it has priority over the other tests below.
1016263509Sdim  //
1017263509Sdim  // These sequences rely on the fact that the upper two bits of the
1018263509Sdim  // IPM result are zero.
1019263509Sdim  uint64_t TopBit = uint64_t(1) << 31;
1020263509Sdim  if (CCMask == (CCValid & SystemZ::CCMASK_0))
1021263509Sdim    return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1022263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1023263509Sdim    return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1024263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0
1025263509Sdim                            | SystemZ::CCMASK_1
1026263509Sdim                            | SystemZ::CCMASK_2)))
1027263509Sdim    return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1028263509Sdim  if (CCMask == (CCValid & SystemZ::CCMASK_3))
1029263509Sdim    return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1030263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_1
1031263509Sdim                            | SystemZ::CCMASK_2
1032263509Sdim                            | SystemZ::CCMASK_3)))
1033263509Sdim    return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1034263509Sdim
1035263509Sdim  // Next try inverting the value and testing a bit.  0/1 could be
1036263509Sdim  // handled this way too, but we dealt with that case above.
1037263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1038263509Sdim    return IPMConversion(-1, 0, SystemZ::IPM_CC);
1039263509Sdim
1040263509Sdim  // Handle cases where adding a value forces a non-sign bit to contain
1041263509Sdim  // the right value.
1042263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1043263509Sdim    return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1044263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1045263509Sdim    return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1046263509Sdim
1047263509Sdim  // The remaing cases are 1, 2, 0/1/3 and 0/2/3.  All these are
1048263509Sdim  // can be done by inverting the low CC bit and applying one of the
1049263509Sdim  // sign-based extractions above.
1050263509Sdim  if (CCMask == (CCValid & SystemZ::CCMASK_1))
1051263509Sdim    return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1052263509Sdim  if (CCMask == (CCValid & SystemZ::CCMASK_2))
1053263509Sdim    return IPMConversion(1 << SystemZ::IPM_CC,
1054263509Sdim                         TopBit - (3 << SystemZ::IPM_CC), 31);
1055263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0
1056263509Sdim                            | SystemZ::CCMASK_1
1057263509Sdim                            | SystemZ::CCMASK_3)))
1058263509Sdim    return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1059263509Sdim  if (CCMask == (CCValid & (SystemZ::CCMASK_0
1060263509Sdim                            | SystemZ::CCMASK_2
1061263509Sdim                            | SystemZ::CCMASK_3)))
1062263509Sdim    return IPMConversion(1 << SystemZ::IPM_CC,
1063263509Sdim                         TopBit - (1 << SystemZ::IPM_CC), 31);
1064263509Sdim
1065263509Sdim  llvm_unreachable("Unexpected CC combination");
1066263509Sdim}
1067263509Sdim
1068251607Sdim// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
1069263509Sdim// can be converted to a comparison against zero, adjust the operands
1070263509Sdim// as necessary.
1071263509Sdimstatic void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
1072263509Sdim                          SDValue &CmpOp0, SDValue &CmpOp1,
1073263509Sdim                          unsigned &CCMask) {
1074263509Sdim  if (IsUnsigned)
1075263509Sdim    return;
1076263509Sdim
1077263509Sdim  ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
1078263509Sdim  if (!ConstOp1)
1079263509Sdim    return;
1080263509Sdim
1081263509Sdim  int64_t Value = ConstOp1->getSExtValue();
1082263509Sdim  if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
1083263509Sdim      (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
1084263509Sdim      (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
1085263509Sdim      (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
1086263509Sdim    CCMask ^= SystemZ::CCMASK_CMP_EQ;
1087263509Sdim    CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
1088263509Sdim  }
1089263509Sdim}
1090263509Sdim
1091263509Sdim// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
1092251607Sdim// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
1093251607Sdimstatic void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
1094251607Sdim                             SDValue &CmpOp0, SDValue &CmpOp1,
1095251607Sdim                             unsigned &CCMask) {
1096251607Sdim  // For us to make any changes, it must a comparison between a single-use
1097251607Sdim  // load and a constant.
1098251607Sdim  if (!CmpOp0.hasOneUse() ||
1099251607Sdim      CmpOp0.getOpcode() != ISD::LOAD ||
1100251607Sdim      CmpOp1.getOpcode() != ISD::Constant)
1101251607Sdim    return;
1102251607Sdim
1103251607Sdim  // We must have an 8- or 16-bit load.
1104251607Sdim  LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
1105251607Sdim  unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1106251607Sdim  if (NumBits != 8 && NumBits != 16)
1107251607Sdim    return;
1108251607Sdim
1109251607Sdim  // The load must be an extending one and the constant must be within the
1110251607Sdim  // range of the unextended value.
1111251607Sdim  ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
1112251607Sdim  uint64_t Value = Constant->getZExtValue();
1113251607Sdim  uint64_t Mask = (1 << NumBits) - 1;
1114251607Sdim  if (Load->getExtensionType() == ISD::SEXTLOAD) {
1115251607Sdim    int64_t SignedValue = Constant->getSExtValue();
1116263509Sdim    if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
1117251607Sdim      return;
1118251607Sdim    // Unsigned comparison between two sign-extended values is equivalent
1119251607Sdim    // to unsigned comparison between two zero-extended values.
1120251607Sdim    if (IsUnsigned)
1121251607Sdim      Value &= Mask;
1122251607Sdim    else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
1123251607Sdim             CCMask == SystemZ::CCMASK_CMP_NE)
1124251607Sdim      // Any choice of IsUnsigned is OK for equality comparisons.
1125251607Sdim      // We could use either CHHSI or CLHHSI for 16-bit comparisons,
1126251607Sdim      // but since we use CLHHSI for zero extensions, it seems better
1127251607Sdim      // to be consistent and do the same here.
1128251607Sdim      Value &= Mask, IsUnsigned = true;
1129251607Sdim    else if (NumBits == 8) {
1130251607Sdim      // Try to treat the comparison as unsigned, so that we can use CLI.
1131251607Sdim      // Adjust CCMask and Value as necessary.
1132251607Sdim      if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
1133251607Sdim        // Test whether the high bit of the byte is set.
1134251607Sdim        Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
1135263509Sdim      else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
1136251607Sdim        // Test whether the high bit of the byte is clear.
1137251607Sdim        Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
1138251607Sdim      else
1139251607Sdim        // No instruction exists for this combination.
1140251607Sdim        return;
1141251607Sdim    }
1142251607Sdim  } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1143251607Sdim    if (Value > Mask)
1144251607Sdim      return;
1145251607Sdim    // Signed comparison between two zero-extended values is equivalent
1146251607Sdim    // to unsigned comparison.
1147251607Sdim    IsUnsigned = true;
1148251607Sdim  } else
1149251607Sdim    return;
1150251607Sdim
1151251607Sdim  // Make sure that the first operand is an i32 of the right extension type.
1152251607Sdim  ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
1153251607Sdim  if (CmpOp0.getValueType() != MVT::i32 ||
1154251607Sdim      Load->getExtensionType() != ExtType)
1155263509Sdim    CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
1156251607Sdim                            Load->getChain(), Load->getBasePtr(),
1157251607Sdim                            Load->getPointerInfo(), Load->getMemoryVT(),
1158251607Sdim                            Load->isVolatile(), Load->isNonTemporal(),
1159251607Sdim                            Load->getAlignment());
1160251607Sdim
1161251607Sdim  // Make sure that the second operand is an i32 with the right value.
1162251607Sdim  if (CmpOp1.getValueType() != MVT::i32 ||
1163251607Sdim      Value != Constant->getZExtValue())
1164251607Sdim    CmpOp1 = DAG.getConstant(Value, MVT::i32);
1165251607Sdim}
1166251607Sdim
1167263509Sdim// Return true if Op is either an unextended load, or a load suitable
1168263509Sdim// for integer register-memory comparisons of type ICmpType.
1169263509Sdimstatic bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1170263509Sdim  LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
1171263509Sdim  if (Load) {
1172263509Sdim    // There are no instructions to compare a register with a memory byte.
1173263509Sdim    if (Load->getMemoryVT() == MVT::i8)
1174263509Sdim      return false;
1175263509Sdim    // Otherwise decide on extension type.
1176263509Sdim    switch (Load->getExtensionType()) {
1177263509Sdim    case ISD::NON_EXTLOAD:
1178263509Sdim      return true;
1179263509Sdim    case ISD::SEXTLOAD:
1180263509Sdim      return ICmpType != SystemZICMP::UnsignedOnly;
1181263509Sdim    case ISD::ZEXTLOAD:
1182263509Sdim      return ICmpType != SystemZICMP::SignedOnly;
1183263509Sdim    default:
1184263509Sdim      break;
1185263509Sdim    }
1186263509Sdim  }
1187263509Sdim  return false;
1188263509Sdim}
1189263509Sdim
1190263509Sdim// Return true if it is better to swap comparison operands Op0 and Op1.
1191263509Sdim// ICmpType is the type of an integer comparison.
1192263509Sdimstatic bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
1193263509Sdim                                  unsigned ICmpType) {
1194263509Sdim  // Leave f128 comparisons alone, since they have no memory forms.
1195263509Sdim  if (Op0.getValueType() == MVT::f128)
1196251607Sdim    return false;
1197251607Sdim
1198263509Sdim  // Always keep a floating-point constant second, since comparisons with
1199263509Sdim  // zero can use LOAD TEST and comparisons with other constants make a
1200263509Sdim  // natural memory operand.
1201263509Sdim  if (isa<ConstantFPSDNode>(Op1))
1202263509Sdim    return false;
1203251607Sdim
1204263509Sdim  // Never swap comparisons with zero since there are many ways to optimize
1205263509Sdim  // those later.
1206263509Sdim  ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
1207263509Sdim  if (COp1 && COp1->getZExtValue() == 0)
1208263509Sdim    return false;
1209251607Sdim
1210263509Sdim  // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1211263509Sdim  // In that case we generally prefer the memory to be second.
1212263509Sdim  if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
1213263509Sdim      !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
1214263509Sdim    // The only exceptions are when the second operand is a constant and
1215263509Sdim    // we can use things like CHHSI.
1216263509Sdim    if (!COp1)
1217251607Sdim      return true;
1218263509Sdim    // The unsigned memory-immediate instructions can handle 16-bit
1219263509Sdim    // unsigned integers.
1220263509Sdim    if (ICmpType != SystemZICMP::SignedOnly &&
1221263509Sdim        isUInt<16>(COp1->getZExtValue()))
1222263509Sdim      return false;
1223263509Sdim    // The signed memory-immediate instructions can handle 16-bit
1224263509Sdim    // signed integers.
1225263509Sdim    if (ICmpType != SystemZICMP::UnsignedOnly &&
1226263509Sdim        isInt<16>(COp1->getSExtValue()))
1227263509Sdim      return false;
1228263509Sdim    return true;
1229263509Sdim  }
1230263509Sdim  return false;
1231263509Sdim}
1232251607Sdim
1233263509Sdim// Return true if shift operation N has an in-range constant shift value.
1234263509Sdim// Store it in ShiftVal if so.
1235263509Sdimstatic bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1236263509Sdim  ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1237263509Sdim  if (!Shift)
1238251607Sdim    return false;
1239263509Sdim
1240263509Sdim  uint64_t Amount = Shift->getZExtValue();
1241263509Sdim  if (Amount >= N.getValueType().getSizeInBits())
1242263509Sdim    return false;
1243263509Sdim
1244263509Sdim  ShiftVal = Amount;
1245263509Sdim  return true;
1246263509Sdim}
1247263509Sdim
1248263509Sdim// Check whether an AND with Mask is suitable for a TEST UNDER MASK
1249263509Sdim// instruction and whether the CC value is descriptive enough to handle
1250263509Sdim// a comparison of type Opcode between the AND result and CmpVal.
1251263509Sdim// CCMask says which comparison result is being tested and BitSize is
1252263509Sdim// the number of bits in the operands.  If TEST UNDER MASK can be used,
1253263509Sdim// return the corresponding CC mask, otherwise return 0.
1254263509Sdimstatic unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1255263509Sdim                                     uint64_t Mask, uint64_t CmpVal,
1256263509Sdim                                     unsigned ICmpType) {
1257263509Sdim  assert(Mask != 0 && "ANDs with zero should have been removed by now");
1258263509Sdim
1259263509Sdim  // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1260263509Sdim  if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1261263509Sdim      !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1262263509Sdim    return 0;
1263263509Sdim
1264263509Sdim  // Work out the masks for the lowest and highest bits.
1265263509Sdim  unsigned HighShift = 63 - countLeadingZeros(Mask);
1266263509Sdim  uint64_t High = uint64_t(1) << HighShift;
1267263509Sdim  uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1268263509Sdim
1269263509Sdim  // Signed ordered comparisons are effectively unsigned if the sign
1270263509Sdim  // bit is dropped.
1271263509Sdim  bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1272263509Sdim
1273263509Sdim  // Check for equality comparisons with 0, or the equivalent.
1274263509Sdim  if (CmpVal == 0) {
1275263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_EQ)
1276263509Sdim      return SystemZ::CCMASK_TM_ALL_0;
1277263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_NE)
1278263509Sdim      return SystemZ::CCMASK_TM_SOME_1;
1279251607Sdim  }
1280263509Sdim  if (EffectivelyUnsigned && CmpVal <= Low) {
1281263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LT)
1282263509Sdim      return SystemZ::CCMASK_TM_ALL_0;
1283263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GE)
1284263509Sdim      return SystemZ::CCMASK_TM_SOME_1;
1285263509Sdim  }
1286263509Sdim  if (EffectivelyUnsigned && CmpVal < Low) {
1287263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LE)
1288263509Sdim      return SystemZ::CCMASK_TM_ALL_0;
1289263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GT)
1290263509Sdim      return SystemZ::CCMASK_TM_SOME_1;
1291263509Sdim  }
1292251607Sdim
1293263509Sdim  // Check for equality comparisons with the mask, or the equivalent.
1294263509Sdim  if (CmpVal == Mask) {
1295263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_EQ)
1296263509Sdim      return SystemZ::CCMASK_TM_ALL_1;
1297263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_NE)
1298263509Sdim      return SystemZ::CCMASK_TM_SOME_0;
1299263509Sdim  }
1300263509Sdim  if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1301263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GT)
1302263509Sdim      return SystemZ::CCMASK_TM_ALL_1;
1303263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LE)
1304263509Sdim      return SystemZ::CCMASK_TM_SOME_0;
1305263509Sdim  }
1306263509Sdim  if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1307263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GE)
1308263509Sdim      return SystemZ::CCMASK_TM_ALL_1;
1309263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LT)
1310263509Sdim      return SystemZ::CCMASK_TM_SOME_0;
1311263509Sdim  }
1312251607Sdim
1313263509Sdim  // Check for ordered comparisons with the top bit.
1314263509Sdim  if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1315263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LE)
1316263509Sdim      return SystemZ::CCMASK_TM_MSB_0;
1317263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GT)
1318263509Sdim      return SystemZ::CCMASK_TM_MSB_1;
1319251607Sdim  }
1320263509Sdim  if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1321263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_LT)
1322263509Sdim      return SystemZ::CCMASK_TM_MSB_0;
1323263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_GE)
1324263509Sdim      return SystemZ::CCMASK_TM_MSB_1;
1325263509Sdim  }
1326251607Sdim
1327263509Sdim  // If there are just two bits, we can do equality checks for Low and High
1328263509Sdim  // as well.
1329263509Sdim  if (Mask == Low + High) {
1330263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
1331263509Sdim      return SystemZ::CCMASK_TM_MIXED_MSB_0;
1332263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
1333263509Sdim      return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
1334263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
1335263509Sdim      return SystemZ::CCMASK_TM_MIXED_MSB_1;
1336263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
1337263509Sdim      return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
1338263509Sdim  }
1339263509Sdim
1340263509Sdim  // Looks like we've exhausted our options.
1341263509Sdim  return 0;
1342251607Sdim}
1343251607Sdim
1344263509Sdim// See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
1345263509Sdim// implemented as a TEST UNDER MASK instruction when the condition being
1346263509Sdim// tested is as described by CCValid and CCMask.  Update the arguments
1347263509Sdim// with the TM version if so.
1348263509Sdimstatic void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
1349263509Sdim                                   SDValue &CmpOp0, SDValue &CmpOp1,
1350263509Sdim                                   unsigned &CCValid, unsigned &CCMask,
1351263509Sdim                                   unsigned &ICmpType) {
1352263509Sdim  // Check that we have a comparison with a constant.
1353263509Sdim  ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
1354263509Sdim  if (!ConstCmpOp1)
1355263509Sdim    return;
1356263509Sdim  uint64_t CmpVal = ConstCmpOp1->getZExtValue();
1357263509Sdim
1358263509Sdim  // Check whether the nonconstant input is an AND with a constant mask.
1359263509Sdim  if (CmpOp0.getOpcode() != ISD::AND)
1360263509Sdim    return;
1361263509Sdim  SDValue AndOp0 = CmpOp0.getOperand(0);
1362263509Sdim  SDValue AndOp1 = CmpOp0.getOperand(1);
1363263509Sdim  ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
1364263509Sdim  if (!Mask)
1365263509Sdim    return;
1366263509Sdim  uint64_t MaskVal = Mask->getZExtValue();
1367263509Sdim
1368263509Sdim  // Check whether the combination of mask, comparison value and comparison
1369263509Sdim  // type are suitable.
1370263509Sdim  unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
1371263509Sdim  unsigned NewCCMask, ShiftVal;
1372263509Sdim  if (ICmpType != SystemZICMP::SignedOnly &&
1373263509Sdim      AndOp0.getOpcode() == ISD::SHL &&
1374263509Sdim      isSimpleShift(AndOp0, ShiftVal) &&
1375263509Sdim      (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
1376263509Sdim                                        CmpVal >> ShiftVal,
1377263509Sdim                                        SystemZICMP::Any))) {
1378263509Sdim    AndOp0 = AndOp0.getOperand(0);
1379263509Sdim    AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
1380263509Sdim  } else if (ICmpType != SystemZICMP::SignedOnly &&
1381263509Sdim             AndOp0.getOpcode() == ISD::SRL &&
1382263509Sdim             isSimpleShift(AndOp0, ShiftVal) &&
1383263509Sdim             (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
1384263509Sdim                                               MaskVal << ShiftVal,
1385263509Sdim                                               CmpVal << ShiftVal,
1386263509Sdim                                               SystemZICMP::UnsignedOnly))) {
1387263509Sdim    AndOp0 = AndOp0.getOperand(0);
1388263509Sdim    AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
1389263509Sdim  } else {
1390263509Sdim    NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
1391263509Sdim                                     ICmpType);
1392263509Sdim    if (!NewCCMask)
1393263509Sdim      return;
1394263509Sdim  }
1395263509Sdim
1396263509Sdim  // Go ahead and make the change.
1397263509Sdim  Opcode = SystemZISD::TM;
1398263509Sdim  CmpOp0 = AndOp0;
1399263509Sdim  CmpOp1 = AndOp1;
1400263509Sdim  ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
1401263509Sdim              bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
1402263509Sdim  CCValid = SystemZ::CCMASK_TM;
1403263509Sdim  CCMask = NewCCMask;
1404263509Sdim}
1405263509Sdim
1406263509Sdim// Return a target node that compares CmpOp0 with CmpOp1 and stores a
1407263509Sdim// 2-bit result in CC.  Set CCValid to the CCMASK_* of all possible
1408263509Sdim// 2-bit results and CCMask to the subset of those results that are
1409263509Sdim// associated with Cond.
1410263509Sdimstatic SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
1411263509Sdim                       SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
1412263509Sdim                       ISD::CondCode Cond, unsigned &CCValid,
1413263509Sdim                       unsigned &CCMask) {
1414251607Sdim  bool IsUnsigned = false;
1415263509Sdim  CCMask = CCMaskForCondCode(Cond);
1416263509Sdim  unsigned Opcode, ICmpType = 0;
1417263509Sdim  if (CmpOp0.getValueType().isFloatingPoint()) {
1418263509Sdim    CCValid = SystemZ::CCMASK_FCMP;
1419263509Sdim    Opcode = SystemZISD::FCMP;
1420263509Sdim  } else {
1421251607Sdim    IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
1422263509Sdim    CCValid = SystemZ::CCMASK_ICMP;
1423263509Sdim    CCMask &= CCValid;
1424263509Sdim    adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
1425251607Sdim    adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
1426263509Sdim    Opcode = SystemZISD::ICMP;
1427263509Sdim    // Choose the type of comparison.  Equality and inequality tests can
1428263509Sdim    // use either signed or unsigned comparisons.  The choice also doesn't
1429263509Sdim    // matter if both sign bits are known to be clear.  In those cases we
1430263509Sdim    // want to give the main isel code the freedom to choose whichever
1431263509Sdim    // form fits best.
1432263509Sdim    if (CCMask == SystemZ::CCMASK_CMP_EQ ||
1433263509Sdim        CCMask == SystemZ::CCMASK_CMP_NE ||
1434263509Sdim        (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
1435263509Sdim      ICmpType = SystemZICMP::Any;
1436263509Sdim    else if (IsUnsigned)
1437263509Sdim      ICmpType = SystemZICMP::UnsignedOnly;
1438263509Sdim    else
1439263509Sdim      ICmpType = SystemZICMP::SignedOnly;
1440251607Sdim  }
1441251607Sdim
1442263509Sdim  if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
1443263509Sdim    std::swap(CmpOp0, CmpOp1);
1444263509Sdim    CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1445263509Sdim              (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1446263509Sdim              (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1447263509Sdim              (CCMask & SystemZ::CCMASK_CMP_UO));
1448263509Sdim  }
1449263509Sdim
1450263509Sdim  adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
1451263509Sdim                         ICmpType);
1452263509Sdim  if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
1453263509Sdim    return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
1454263509Sdim                       DAG.getConstant(ICmpType, MVT::i32));
1455263509Sdim  return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
1456251607Sdim}
1457251607Sdim
1458263509Sdim// Implement a 32-bit *MUL_LOHI operation by extending both operands to
1459263509Sdim// 64 bits.  Extend is the extension type to use.  Store the high part
1460263509Sdim// in Hi and the low part in Lo.
1461263509Sdimstatic void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
1462263509Sdim                            unsigned Extend, SDValue Op0, SDValue Op1,
1463263509Sdim                            SDValue &Hi, SDValue &Lo) {
1464263509Sdim  Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
1465263509Sdim  Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
1466263509Sdim  SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
1467263509Sdim  Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64));
1468263509Sdim  Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
1469263509Sdim  Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
1470263509Sdim}
1471263509Sdim
1472251607Sdim// Lower a binary operation that produces two VT results, one in each
1473251607Sdim// half of a GR128 pair.  Op0 and Op1 are the VT operands to the operation,
1474251607Sdim// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
1475251607Sdim// on the extended Op0 and (unextended) Op1.  Store the even register result
1476251607Sdim// in Even and the odd register result in Odd.
1477263509Sdimstatic void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
1478251607Sdim                             unsigned Extend, unsigned Opcode,
1479251607Sdim                             SDValue Op0, SDValue Op1,
1480251607Sdim                             SDValue &Even, SDValue &Odd) {
1481251607Sdim  SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
1482251607Sdim  SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
1483251607Sdim                               SDValue(In128, 0), Op1);
1484251607Sdim  bool Is32Bit = is32Bit(VT);
1485263509Sdim  Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
1486263509Sdim  Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
1487251607Sdim}
1488251607Sdim
1489263509SdimSDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
1490263509Sdim                                          SelectionDAG &DAG) const {
1491263509Sdim  SDValue CmpOp0   = Op.getOperand(0);
1492263509Sdim  SDValue CmpOp1   = Op.getOperand(1);
1493263509Sdim  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1494263509Sdim  SDLoc DL(Op);
1495263509Sdim
1496263509Sdim  unsigned CCValid, CCMask;
1497263509Sdim  SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1498263509Sdim
1499263509Sdim  IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
1500263509Sdim  SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
1501263509Sdim
1502263509Sdim  if (Conversion.XORValue)
1503263509Sdim    Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
1504263509Sdim                         DAG.getConstant(Conversion.XORValue, MVT::i32));
1505263509Sdim
1506263509Sdim  if (Conversion.AddValue)
1507263509Sdim    Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
1508263509Sdim                         DAG.getConstant(Conversion.AddValue, MVT::i32));
1509263509Sdim
1510263509Sdim  // The SHR/AND sequence should get optimized to an RISBG.
1511263509Sdim  Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
1512263509Sdim                       DAG.getConstant(Conversion.Bit, MVT::i32));
1513263509Sdim  if (Conversion.Bit != 31)
1514263509Sdim    Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
1515263509Sdim                         DAG.getConstant(1, MVT::i32));
1516263509Sdim  return Result;
1517263509Sdim}
1518263509Sdim
1519251607SdimSDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1520251607Sdim  SDValue Chain    = Op.getOperand(0);
1521251607Sdim  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1522251607Sdim  SDValue CmpOp0   = Op.getOperand(2);
1523251607Sdim  SDValue CmpOp1   = Op.getOperand(3);
1524251607Sdim  SDValue Dest     = Op.getOperand(4);
1525263509Sdim  SDLoc DL(Op);
1526251607Sdim
1527263509Sdim  unsigned CCValid, CCMask;
1528263509Sdim  SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1529251607Sdim  return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
1530263509Sdim                     Chain, DAG.getConstant(CCValid, MVT::i32),
1531263509Sdim                     DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
1532251607Sdim}
1533251607Sdim
1534251607SdimSDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
1535251607Sdim                                              SelectionDAG &DAG) const {
1536251607Sdim  SDValue CmpOp0   = Op.getOperand(0);
1537251607Sdim  SDValue CmpOp1   = Op.getOperand(1);
1538251607Sdim  SDValue TrueOp   = Op.getOperand(2);
1539251607Sdim  SDValue FalseOp  = Op.getOperand(3);
1540251607Sdim  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1541263509Sdim  SDLoc DL(Op);
1542251607Sdim
1543263509Sdim  unsigned CCValid, CCMask;
1544263509Sdim  SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1545251607Sdim
1546263509Sdim  SmallVector<SDValue, 5> Ops;
1547251607Sdim  Ops.push_back(TrueOp);
1548251607Sdim  Ops.push_back(FalseOp);
1549263509Sdim  Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
1550251607Sdim  Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
1551251607Sdim  Ops.push_back(Flags);
1552251607Sdim
1553251607Sdim  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
1554251607Sdim  return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
1555251607Sdim}
1556251607Sdim
1557251607SdimSDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
1558251607Sdim                                                  SelectionDAG &DAG) const {
1559263509Sdim  SDLoc DL(Node);
1560251607Sdim  const GlobalValue *GV = Node->getGlobal();
1561251607Sdim  int64_t Offset = Node->getOffset();
1562251607Sdim  EVT PtrVT = getPointerTy();
1563251607Sdim  Reloc::Model RM = TM.getRelocationModel();
1564251607Sdim  CodeModel::Model CM = TM.getCodeModel();
1565251607Sdim
1566251607Sdim  SDValue Result;
1567251607Sdim  if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
1568263509Sdim    // Assign anchors at 1<<12 byte boundaries.
1569263509Sdim    uint64_t Anchor = Offset & ~uint64_t(0xfff);
1570263509Sdim    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
1571263509Sdim    Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1572263509Sdim
1573263509Sdim    // The offset can be folded into the address if it is aligned to a halfword.
1574263509Sdim    Offset -= Anchor;
1575263509Sdim    if (Offset != 0 && (Offset & 1) == 0) {
1576263509Sdim      SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
1577263509Sdim      Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
1578251607Sdim      Offset = 0;
1579251607Sdim    }
1580251607Sdim  } else {
1581251607Sdim    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
1582251607Sdim    Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1583251607Sdim    Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
1584251607Sdim                         MachinePointerInfo::getGOT(), false, false, false, 0);
1585251607Sdim  }
1586251607Sdim
1587251607Sdim  // If there was a non-zero offset that we didn't fold, create an explicit
1588251607Sdim  // addition for it.
1589251607Sdim  if (Offset != 0)
1590251607Sdim    Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
1591251607Sdim                         DAG.getConstant(Offset, PtrVT));
1592251607Sdim
1593251607Sdim  return Result;
1594251607Sdim}
1595251607Sdim
1596251607SdimSDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
1597251607Sdim						     SelectionDAG &DAG) const {
1598263509Sdim  SDLoc DL(Node);
1599251607Sdim  const GlobalValue *GV = Node->getGlobal();
1600251607Sdim  EVT PtrVT = getPointerTy();
1601251607Sdim  TLSModel::Model model = TM.getTLSModel(GV);
1602251607Sdim
1603251607Sdim  if (model != TLSModel::LocalExec)
1604251607Sdim    llvm_unreachable("only local-exec TLS mode supported");
1605251607Sdim
1606251607Sdim  // The high part of the thread pointer is in access register 0.
1607251607Sdim  SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1608251607Sdim                             DAG.getConstant(0, MVT::i32));
1609251607Sdim  TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
1610251607Sdim
1611251607Sdim  // The low part of the thread pointer is in access register 1.
1612251607Sdim  SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1613251607Sdim                             DAG.getConstant(1, MVT::i32));
1614251607Sdim  TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
1615251607Sdim
1616251607Sdim  // Merge them into a single 64-bit address.
1617251607Sdim  SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
1618251607Sdim				    DAG.getConstant(32, PtrVT));
1619251607Sdim  SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
1620251607Sdim
1621251607Sdim  // Get the offset of GA from the thread pointer.
1622251607Sdim  SystemZConstantPoolValue *CPV =
1623251607Sdim    SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
1624251607Sdim
1625251607Sdim  // Force the offset into the constant pool and load it from there.
1626251607Sdim  SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8);
1627251607Sdim  SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
1628251607Sdim			       CPAddr, MachinePointerInfo::getConstantPool(),
1629251607Sdim			       false, false, false, 0);
1630251607Sdim
1631251607Sdim  // Add the base and offset together.
1632251607Sdim  return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
1633251607Sdim}
1634251607Sdim
1635251607SdimSDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
1636251607Sdim                                                 SelectionDAG &DAG) const {
1637263509Sdim  SDLoc DL(Node);
1638251607Sdim  const BlockAddress *BA = Node->getBlockAddress();
1639251607Sdim  int64_t Offset = Node->getOffset();
1640251607Sdim  EVT PtrVT = getPointerTy();
1641251607Sdim
1642251607Sdim  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
1643251607Sdim  Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1644251607Sdim  return Result;
1645251607Sdim}
1646251607Sdim
1647251607SdimSDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
1648251607Sdim                                              SelectionDAG &DAG) const {
1649263509Sdim  SDLoc DL(JT);
1650251607Sdim  EVT PtrVT = getPointerTy();
1651251607Sdim  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1652251607Sdim
1653251607Sdim  // Use LARL to load the address of the table.
1654251607Sdim  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1655251607Sdim}
1656251607Sdim
1657251607SdimSDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
1658251607Sdim                                                 SelectionDAG &DAG) const {
1659263509Sdim  SDLoc DL(CP);
1660251607Sdim  EVT PtrVT = getPointerTy();
1661251607Sdim
1662251607Sdim  SDValue Result;
1663251607Sdim  if (CP->isMachineConstantPoolEntry())
1664251607Sdim    Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1665251607Sdim				       CP->getAlignment());
1666251607Sdim  else
1667251607Sdim    Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1668251607Sdim				       CP->getAlignment(), CP->getOffset());
1669251607Sdim
1670251607Sdim  // Use LARL to load the address of the constant pool entry.
1671251607Sdim  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1672251607Sdim}
1673251607Sdim
1674251607SdimSDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
1675251607Sdim                                            SelectionDAG &DAG) const {
1676263509Sdim  SDLoc DL(Op);
1677251607Sdim  SDValue In = Op.getOperand(0);
1678251607Sdim  EVT InVT = In.getValueType();
1679251607Sdim  EVT ResVT = Op.getValueType();
1680251607Sdim
1681251607Sdim  if (InVT == MVT::i32 && ResVT == MVT::f32) {
1682263509Sdim    SDValue In64;
1683263509Sdim    if (Subtarget.hasHighWord()) {
1684263509Sdim      SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
1685263509Sdim                                       MVT::i64);
1686263509Sdim      In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
1687263509Sdim                                       MVT::i64, SDValue(U64, 0), In);
1688263509Sdim    } else {
1689263509Sdim      In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
1690263509Sdim      In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
1691263509Sdim                         DAG.getConstant(32, MVT::i64));
1692263509Sdim    }
1693263509Sdim    SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
1694263509Sdim    return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
1695263509Sdim                                      DL, MVT::f32, Out64);
1696251607Sdim  }
1697251607Sdim  if (InVT == MVT::f32 && ResVT == MVT::i32) {
1698251607Sdim    SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
1699263509Sdim    SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
1700263509Sdim                                             MVT::f64, SDValue(U64, 0), In);
1701263509Sdim    SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
1702263509Sdim    if (Subtarget.hasHighWord())
1703263509Sdim      return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
1704263509Sdim                                        MVT::i32, Out64);
1705263509Sdim    SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
1706263509Sdim                                DAG.getConstant(32, MVT::i64));
1707263509Sdim    return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
1708251607Sdim  }
1709251607Sdim  llvm_unreachable("Unexpected bitcast combination");
1710251607Sdim}
1711251607Sdim
1712251607SdimSDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
1713251607Sdim                                            SelectionDAG &DAG) const {
1714251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
1715251607Sdim  SystemZMachineFunctionInfo *FuncInfo =
1716251607Sdim    MF.getInfo<SystemZMachineFunctionInfo>();
1717251607Sdim  EVT PtrVT = getPointerTy();
1718251607Sdim
1719251607Sdim  SDValue Chain   = Op.getOperand(0);
1720251607Sdim  SDValue Addr    = Op.getOperand(1);
1721251607Sdim  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1722263509Sdim  SDLoc DL(Op);
1723251607Sdim
1724251607Sdim  // The initial values of each field.
1725251607Sdim  const unsigned NumFields = 4;
1726251607Sdim  SDValue Fields[NumFields] = {
1727251607Sdim    DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT),
1728251607Sdim    DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT),
1729251607Sdim    DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
1730251607Sdim    DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
1731251607Sdim  };
1732251607Sdim
1733251607Sdim  // Store each field into its respective slot.
1734251607Sdim  SDValue MemOps[NumFields];
1735251607Sdim  unsigned Offset = 0;
1736251607Sdim  for (unsigned I = 0; I < NumFields; ++I) {
1737251607Sdim    SDValue FieldAddr = Addr;
1738251607Sdim    if (Offset != 0)
1739251607Sdim      FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
1740251607Sdim                              DAG.getIntPtrConstant(Offset));
1741251607Sdim    MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
1742251607Sdim                             MachinePointerInfo(SV, Offset),
1743251607Sdim                             false, false, 0);
1744251607Sdim    Offset += 8;
1745251607Sdim  }
1746251607Sdim  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
1747251607Sdim}
1748251607Sdim
1749251607SdimSDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
1750251607Sdim                                           SelectionDAG &DAG) const {
1751251607Sdim  SDValue Chain      = Op.getOperand(0);
1752251607Sdim  SDValue DstPtr     = Op.getOperand(1);
1753251607Sdim  SDValue SrcPtr     = Op.getOperand(2);
1754251607Sdim  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
1755251607Sdim  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
1756263509Sdim  SDLoc DL(Op);
1757251607Sdim
1758251607Sdim  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
1759251607Sdim                       /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
1760251607Sdim                       MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
1761251607Sdim}
1762251607Sdim
1763251607SdimSDValue SystemZTargetLowering::
1764251607SdimlowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
1765251607Sdim  SDValue Chain = Op.getOperand(0);
1766251607Sdim  SDValue Size  = Op.getOperand(1);
1767263509Sdim  SDLoc DL(Op);
1768251607Sdim
1769251607Sdim  unsigned SPReg = getStackPointerRegisterToSaveRestore();
1770251607Sdim
1771251607Sdim  // Get a reference to the stack pointer.
1772251607Sdim  SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
1773251607Sdim
1774251607Sdim  // Get the new stack pointer value.
1775251607Sdim  SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size);
1776251607Sdim
1777251607Sdim  // Copy the new stack pointer back.
1778251607Sdim  Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
1779251607Sdim
1780251607Sdim  // The allocated data lives above the 160 bytes allocated for the standard
1781251607Sdim  // frame, plus any outgoing stack arguments.  We don't know how much that
1782251607Sdim  // amounts to yet, so emit a special ADJDYNALLOC placeholder.
1783251607Sdim  SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
1784251607Sdim  SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
1785251607Sdim
1786251607Sdim  SDValue Ops[2] = { Result, Chain };
1787251607Sdim  return DAG.getMergeValues(Ops, 2, DL);
1788251607Sdim}
1789251607Sdim
1790263509SdimSDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
1791263509Sdim                                              SelectionDAG &DAG) const {
1792263509Sdim  EVT VT = Op.getValueType();
1793263509Sdim  SDLoc DL(Op);
1794263509Sdim  SDValue Ops[2];
1795263509Sdim  if (is32Bit(VT))
1796263509Sdim    // Just do a normal 64-bit multiplication and extract the results.
1797263509Sdim    // We define this so that it can be used for constant division.
1798263509Sdim    lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
1799263509Sdim                    Op.getOperand(1), Ops[1], Ops[0]);
1800263509Sdim  else {
1801263509Sdim    // Do a full 128-bit multiplication based on UMUL_LOHI64:
1802263509Sdim    //
1803263509Sdim    //   (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
1804263509Sdim    //
1805263509Sdim    // but using the fact that the upper halves are either all zeros
1806263509Sdim    // or all ones:
1807263509Sdim    //
1808263509Sdim    //   (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
1809263509Sdim    //
1810263509Sdim    // and grouping the right terms together since they are quicker than the
1811263509Sdim    // multiplication:
1812263509Sdim    //
1813263509Sdim    //   (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
1814263509Sdim    SDValue C63 = DAG.getConstant(63, MVT::i64);
1815263509Sdim    SDValue LL = Op.getOperand(0);
1816263509Sdim    SDValue RL = Op.getOperand(1);
1817263509Sdim    SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
1818263509Sdim    SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
1819263509Sdim    // UMUL_LOHI64 returns the low result in the odd register and the high
1820263509Sdim    // result in the even register.  SMUL_LOHI is defined to return the
1821263509Sdim    // low half first, so the results are in reverse order.
1822263509Sdim    lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
1823263509Sdim                     LL, RL, Ops[1], Ops[0]);
1824263509Sdim    SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
1825263509Sdim    SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
1826263509Sdim    SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
1827263509Sdim    Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
1828263509Sdim  }
1829263509Sdim  return DAG.getMergeValues(Ops, 2, DL);
1830263509Sdim}
1831263509Sdim
1832251607SdimSDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
1833251607Sdim                                              SelectionDAG &DAG) const {
1834251607Sdim  EVT VT = Op.getValueType();
1835263509Sdim  SDLoc DL(Op);
1836251607Sdim  SDValue Ops[2];
1837263509Sdim  if (is32Bit(VT))
1838263509Sdim    // Just do a normal 64-bit multiplication and extract the results.
1839263509Sdim    // We define this so that it can be used for constant division.
1840263509Sdim    lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
1841263509Sdim                    Op.getOperand(1), Ops[1], Ops[0]);
1842263509Sdim  else
1843263509Sdim    // UMUL_LOHI64 returns the low result in the odd register and the high
1844263509Sdim    // result in the even register.  UMUL_LOHI is defined to return the
1845263509Sdim    // low half first, so the results are in reverse order.
1846263509Sdim    lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
1847263509Sdim                     Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1848251607Sdim  return DAG.getMergeValues(Ops, 2, DL);
1849251607Sdim}
1850251607Sdim
1851251607SdimSDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
1852251607Sdim                                            SelectionDAG &DAG) const {
1853251607Sdim  SDValue Op0 = Op.getOperand(0);
1854251607Sdim  SDValue Op1 = Op.getOperand(1);
1855251607Sdim  EVT VT = Op.getValueType();
1856263509Sdim  SDLoc DL(Op);
1857263509Sdim  unsigned Opcode;
1858251607Sdim
1859251607Sdim  // We use DSGF for 32-bit division.
1860251607Sdim  if (is32Bit(VT)) {
1861251607Sdim    Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
1862263509Sdim    Opcode = SystemZISD::SDIVREM32;
1863263509Sdim  } else if (DAG.ComputeNumSignBits(Op1) > 32) {
1864263509Sdim    Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
1865263509Sdim    Opcode = SystemZISD::SDIVREM32;
1866263509Sdim  } else
1867263509Sdim    Opcode = SystemZISD::SDIVREM64;
1868251607Sdim
1869251607Sdim  // DSG(F) takes a 64-bit dividend, so the even register in the GR128
1870251607Sdim  // input is "don't care".  The instruction returns the remainder in
1871251607Sdim  // the even register and the quotient in the odd register.
1872251607Sdim  SDValue Ops[2];
1873263509Sdim  lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
1874251607Sdim                   Op0, Op1, Ops[1], Ops[0]);
1875251607Sdim  return DAG.getMergeValues(Ops, 2, DL);
1876251607Sdim}
1877251607Sdim
1878251607SdimSDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
1879251607Sdim                                            SelectionDAG &DAG) const {
1880251607Sdim  EVT VT = Op.getValueType();
1881263509Sdim  SDLoc DL(Op);
1882251607Sdim
1883251607Sdim  // DL(G) uses a double-width dividend, so we need to clear the even
1884251607Sdim  // register in the GR128 input.  The instruction returns the remainder
1885251607Sdim  // in the even register and the quotient in the odd register.
1886251607Sdim  SDValue Ops[2];
1887251607Sdim  if (is32Bit(VT))
1888251607Sdim    lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
1889251607Sdim                     Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1890251607Sdim  else
1891251607Sdim    lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
1892251607Sdim                     Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1893251607Sdim  return DAG.getMergeValues(Ops, 2, DL);
1894251607Sdim}
1895251607Sdim
1896251607SdimSDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
1897251607Sdim  assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
1898251607Sdim
1899251607Sdim  // Get the known-zero masks for each operand.
1900251607Sdim  SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
1901251607Sdim  APInt KnownZero[2], KnownOne[2];
1902251607Sdim  DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
1903251607Sdim  DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
1904251607Sdim
1905251607Sdim  // See if the upper 32 bits of one operand and the lower 32 bits of the
1906251607Sdim  // other are known zero.  They are the low and high operands respectively.
1907251607Sdim  uint64_t Masks[] = { KnownZero[0].getZExtValue(),
1908251607Sdim                       KnownZero[1].getZExtValue() };
1909251607Sdim  unsigned High, Low;
1910251607Sdim  if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
1911251607Sdim    High = 1, Low = 0;
1912251607Sdim  else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
1913251607Sdim    High = 0, Low = 1;
1914251607Sdim  else
1915251607Sdim    return Op;
1916251607Sdim
1917251607Sdim  SDValue LowOp = Ops[Low];
1918251607Sdim  SDValue HighOp = Ops[High];
1919251607Sdim
1920251607Sdim  // If the high part is a constant, we're better off using IILH.
1921251607Sdim  if (HighOp.getOpcode() == ISD::Constant)
1922251607Sdim    return Op;
1923251607Sdim
1924251607Sdim  // If the low part is a constant that is outside the range of LHI,
1925251607Sdim  // then we're better off using IILF.
1926251607Sdim  if (LowOp.getOpcode() == ISD::Constant) {
1927251607Sdim    int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
1928251607Sdim    if (!isInt<16>(Value))
1929251607Sdim      return Op;
1930251607Sdim  }
1931251607Sdim
1932251607Sdim  // Check whether the high part is an AND that doesn't change the
1933251607Sdim  // high 32 bits and just masks out low bits.  We can skip it if so.
1934251607Sdim  if (HighOp.getOpcode() == ISD::AND &&
1935251607Sdim      HighOp.getOperand(1).getOpcode() == ISD::Constant) {
1936263509Sdim    SDValue HighOp0 = HighOp.getOperand(0);
1937263509Sdim    uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
1938263509Sdim    if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
1939263509Sdim      HighOp = HighOp0;
1940251607Sdim  }
1941251607Sdim
1942251607Sdim  // Take advantage of the fact that all GR32 operations only change the
1943251607Sdim  // low 32 bits by truncating Low to an i32 and inserting it directly
1944251607Sdim  // using a subreg.  The interesting cases are those where the truncation
1945251607Sdim  // can be folded.
1946263509Sdim  SDLoc DL(Op);
1947251607Sdim  SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
1948263509Sdim  return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
1949263509Sdim                                   MVT::i64, HighOp, Low32);
1950251607Sdim}
1951251607Sdim
1952251607Sdim// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation.  Lower the first
1953251607Sdim// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
1954251607SdimSDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
1955251607Sdim                                                SelectionDAG &DAG,
1956251607Sdim                                                unsigned Opcode) const {
1957251607Sdim  AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
1958251607Sdim
1959251607Sdim  // 32-bit operations need no code outside the main loop.
1960251607Sdim  EVT NarrowVT = Node->getMemoryVT();
1961251607Sdim  EVT WideVT = MVT::i32;
1962251607Sdim  if (NarrowVT == WideVT)
1963251607Sdim    return Op;
1964251607Sdim
1965251607Sdim  int64_t BitSize = NarrowVT.getSizeInBits();
1966251607Sdim  SDValue ChainIn = Node->getChain();
1967251607Sdim  SDValue Addr = Node->getBasePtr();
1968251607Sdim  SDValue Src2 = Node->getVal();
1969251607Sdim  MachineMemOperand *MMO = Node->getMemOperand();
1970263509Sdim  SDLoc DL(Node);
1971251607Sdim  EVT PtrVT = Addr.getValueType();
1972251607Sdim
1973251607Sdim  // Convert atomic subtracts of constants into additions.
1974251607Sdim  if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
1975251607Sdim    if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
1976251607Sdim      Opcode = SystemZISD::ATOMIC_LOADW_ADD;
1977251607Sdim      Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
1978251607Sdim    }
1979251607Sdim
1980251607Sdim  // Get the address of the containing word.
1981251607Sdim  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
1982251607Sdim                                    DAG.getConstant(-4, PtrVT));
1983251607Sdim
1984251607Sdim  // Get the number of bits that the word must be rotated left in order
1985251607Sdim  // to bring the field to the top bits of a GR32.
1986251607Sdim  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
1987251607Sdim                                 DAG.getConstant(3, PtrVT));
1988251607Sdim  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
1989251607Sdim
1990251607Sdim  // Get the complementing shift amount, for rotating a field in the top
1991251607Sdim  // bits back to its proper position.
1992251607Sdim  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
1993251607Sdim                                    DAG.getConstant(0, WideVT), BitShift);
1994251607Sdim
1995251607Sdim  // Extend the source operand to 32 bits and prepare it for the inner loop.
1996251607Sdim  // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
1997251607Sdim  // operations require the source to be shifted in advance.  (This shift
1998251607Sdim  // can be folded if the source is constant.)  For AND and NAND, the lower
1999251607Sdim  // bits must be set, while for other opcodes they should be left clear.
2000251607Sdim  if (Opcode != SystemZISD::ATOMIC_SWAPW)
2001251607Sdim    Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
2002251607Sdim                       DAG.getConstant(32 - BitSize, WideVT));
2003251607Sdim  if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
2004251607Sdim      Opcode == SystemZISD::ATOMIC_LOADW_NAND)
2005251607Sdim    Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
2006251607Sdim                       DAG.getConstant(uint32_t(-1) >> BitSize, WideVT));
2007251607Sdim
2008251607Sdim  // Construct the ATOMIC_LOADW_* node.
2009251607Sdim  SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2010251607Sdim  SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
2011251607Sdim                    DAG.getConstant(BitSize, WideVT) };
2012251607Sdim  SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
2013251607Sdim                                             array_lengthof(Ops),
2014251607Sdim                                             NarrowVT, MMO);
2015251607Sdim
2016251607Sdim  // Rotate the result of the final CS so that the field is in the lower
2017251607Sdim  // bits of a GR32, then truncate it.
2018251607Sdim  SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
2019251607Sdim                                    DAG.getConstant(BitSize, WideVT));
2020251607Sdim  SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
2021251607Sdim
2022251607Sdim  SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
2023251607Sdim  return DAG.getMergeValues(RetOps, 2, DL);
2024251607Sdim}
2025251607Sdim
2026251607Sdim// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation.  Lower the first two
2027251607Sdim// into a fullword ATOMIC_CMP_SWAPW operation.
2028251607SdimSDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
2029251607Sdim                                                    SelectionDAG &DAG) const {
2030251607Sdim  AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
2031251607Sdim
2032251607Sdim  // We have native support for 32-bit compare and swap.
2033251607Sdim  EVT NarrowVT = Node->getMemoryVT();
2034251607Sdim  EVT WideVT = MVT::i32;
2035251607Sdim  if (NarrowVT == WideVT)
2036251607Sdim    return Op;
2037251607Sdim
2038251607Sdim  int64_t BitSize = NarrowVT.getSizeInBits();
2039251607Sdim  SDValue ChainIn = Node->getOperand(0);
2040251607Sdim  SDValue Addr = Node->getOperand(1);
2041251607Sdim  SDValue CmpVal = Node->getOperand(2);
2042251607Sdim  SDValue SwapVal = Node->getOperand(3);
2043251607Sdim  MachineMemOperand *MMO = Node->getMemOperand();
2044263509Sdim  SDLoc DL(Node);
2045251607Sdim  EVT PtrVT = Addr.getValueType();
2046251607Sdim
2047251607Sdim  // Get the address of the containing word.
2048251607Sdim  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
2049251607Sdim                                    DAG.getConstant(-4, PtrVT));
2050251607Sdim
2051251607Sdim  // Get the number of bits that the word must be rotated left in order
2052251607Sdim  // to bring the field to the top bits of a GR32.
2053251607Sdim  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
2054251607Sdim                                 DAG.getConstant(3, PtrVT));
2055251607Sdim  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
2056251607Sdim
2057251607Sdim  // Get the complementing shift amount, for rotating a field in the top
2058251607Sdim  // bits back to its proper position.
2059251607Sdim  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
2060251607Sdim                                    DAG.getConstant(0, WideVT), BitShift);
2061251607Sdim
2062251607Sdim  // Construct the ATOMIC_CMP_SWAPW node.
2063251607Sdim  SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2064251607Sdim  SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
2065251607Sdim                    NegBitShift, DAG.getConstant(BitSize, WideVT) };
2066251607Sdim  SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
2067251607Sdim                                             VTList, Ops, array_lengthof(Ops),
2068251607Sdim                                             NarrowVT, MMO);
2069251607Sdim  return AtomicOp;
2070251607Sdim}
2071251607Sdim
2072251607SdimSDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
2073251607Sdim                                              SelectionDAG &DAG) const {
2074251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
2075251607Sdim  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2076263509Sdim  return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
2077251607Sdim                            SystemZ::R15D, Op.getValueType());
2078251607Sdim}
2079251607Sdim
2080251607SdimSDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
2081251607Sdim                                                 SelectionDAG &DAG) const {
2082251607Sdim  MachineFunction &MF = DAG.getMachineFunction();
2083251607Sdim  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2084263509Sdim  return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
2085251607Sdim                          SystemZ::R15D, Op.getOperand(1));
2086251607Sdim}
2087251607Sdim
2088263509SdimSDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
2089263509Sdim                                             SelectionDAG &DAG) const {
2090263509Sdim  bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
2091263509Sdim  if (!IsData)
2092263509Sdim    // Just preserve the chain.
2093263509Sdim    return Op.getOperand(0);
2094263509Sdim
2095263509Sdim  bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2096263509Sdim  unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
2097263509Sdim  MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
2098263509Sdim  SDValue Ops[] = {
2099263509Sdim    Op.getOperand(0),
2100263509Sdim    DAG.getConstant(Code, MVT::i32),
2101263509Sdim    Op.getOperand(1)
2102263509Sdim  };
2103263509Sdim  return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
2104263509Sdim                                 Node->getVTList(), Ops, array_lengthof(Ops),
2105263509Sdim                                 Node->getMemoryVT(), Node->getMemOperand());
2106263509Sdim}
2107263509Sdim
2108251607SdimSDValue SystemZTargetLowering::LowerOperation(SDValue Op,
2109251607Sdim                                              SelectionDAG &DAG) const {
2110251607Sdim  switch (Op.getOpcode()) {
2111251607Sdim  case ISD::BR_CC:
2112251607Sdim    return lowerBR_CC(Op, DAG);
2113251607Sdim  case ISD::SELECT_CC:
2114251607Sdim    return lowerSELECT_CC(Op, DAG);
2115263509Sdim  case ISD::SETCC:
2116263509Sdim    return lowerSETCC(Op, DAG);
2117251607Sdim  case ISD::GlobalAddress:
2118251607Sdim    return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
2119251607Sdim  case ISD::GlobalTLSAddress:
2120251607Sdim    return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
2121251607Sdim  case ISD::BlockAddress:
2122251607Sdim    return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
2123251607Sdim  case ISD::JumpTable:
2124251607Sdim    return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
2125251607Sdim  case ISD::ConstantPool:
2126251607Sdim    return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
2127251607Sdim  case ISD::BITCAST:
2128251607Sdim    return lowerBITCAST(Op, DAG);
2129251607Sdim  case ISD::VASTART:
2130251607Sdim    return lowerVASTART(Op, DAG);
2131251607Sdim  case ISD::VACOPY:
2132251607Sdim    return lowerVACOPY(Op, DAG);
2133251607Sdim  case ISD::DYNAMIC_STACKALLOC:
2134251607Sdim    return lowerDYNAMIC_STACKALLOC(Op, DAG);
2135263509Sdim  case ISD::SMUL_LOHI:
2136263509Sdim    return lowerSMUL_LOHI(Op, DAG);
2137251607Sdim  case ISD::UMUL_LOHI:
2138251607Sdim    return lowerUMUL_LOHI(Op, DAG);
2139251607Sdim  case ISD::SDIVREM:
2140251607Sdim    return lowerSDIVREM(Op, DAG);
2141251607Sdim  case ISD::UDIVREM:
2142251607Sdim    return lowerUDIVREM(Op, DAG);
2143251607Sdim  case ISD::OR:
2144251607Sdim    return lowerOR(Op, DAG);
2145251607Sdim  case ISD::ATOMIC_SWAP:
2146251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
2147251607Sdim  case ISD::ATOMIC_LOAD_ADD:
2148251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
2149251607Sdim  case ISD::ATOMIC_LOAD_SUB:
2150251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
2151251607Sdim  case ISD::ATOMIC_LOAD_AND:
2152251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
2153251607Sdim  case ISD::ATOMIC_LOAD_OR:
2154251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
2155251607Sdim  case ISD::ATOMIC_LOAD_XOR:
2156251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
2157251607Sdim  case ISD::ATOMIC_LOAD_NAND:
2158251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
2159251607Sdim  case ISD::ATOMIC_LOAD_MIN:
2160251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
2161251607Sdim  case ISD::ATOMIC_LOAD_MAX:
2162251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
2163251607Sdim  case ISD::ATOMIC_LOAD_UMIN:
2164251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
2165251607Sdim  case ISD::ATOMIC_LOAD_UMAX:
2166251607Sdim    return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
2167251607Sdim  case ISD::ATOMIC_CMP_SWAP:
2168251607Sdim    return lowerATOMIC_CMP_SWAP(Op, DAG);
2169251607Sdim  case ISD::STACKSAVE:
2170251607Sdim    return lowerSTACKSAVE(Op, DAG);
2171251607Sdim  case ISD::STACKRESTORE:
2172251607Sdim    return lowerSTACKRESTORE(Op, DAG);
2173263509Sdim  case ISD::PREFETCH:
2174263509Sdim    return lowerPREFETCH(Op, DAG);
2175251607Sdim  default:
2176251607Sdim    llvm_unreachable("Unexpected node to lower");
2177251607Sdim  }
2178251607Sdim}
2179251607Sdim
2180251607Sdimconst char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
2181251607Sdim#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
2182251607Sdim  switch (Opcode) {
2183251607Sdim    OPCODE(RET_FLAG);
2184251607Sdim    OPCODE(CALL);
2185263509Sdim    OPCODE(SIBCALL);
2186251607Sdim    OPCODE(PCREL_WRAPPER);
2187263509Sdim    OPCODE(PCREL_OFFSET);
2188263509Sdim    OPCODE(ICMP);
2189263509Sdim    OPCODE(FCMP);
2190263509Sdim    OPCODE(TM);
2191251607Sdim    OPCODE(BR_CCMASK);
2192251607Sdim    OPCODE(SELECT_CCMASK);
2193251607Sdim    OPCODE(ADJDYNALLOC);
2194251607Sdim    OPCODE(EXTRACT_ACCESS);
2195251607Sdim    OPCODE(UMUL_LOHI64);
2196251607Sdim    OPCODE(SDIVREM64);
2197251607Sdim    OPCODE(UDIVREM32);
2198251607Sdim    OPCODE(UDIVREM64);
2199263509Sdim    OPCODE(MVC);
2200263509Sdim    OPCODE(MVC_LOOP);
2201263509Sdim    OPCODE(NC);
2202263509Sdim    OPCODE(NC_LOOP);
2203263509Sdim    OPCODE(OC);
2204263509Sdim    OPCODE(OC_LOOP);
2205263509Sdim    OPCODE(XC);
2206263509Sdim    OPCODE(XC_LOOP);
2207263509Sdim    OPCODE(CLC);
2208263509Sdim    OPCODE(CLC_LOOP);
2209263509Sdim    OPCODE(STRCMP);
2210263509Sdim    OPCODE(STPCPY);
2211263509Sdim    OPCODE(SEARCH_STRING);
2212263509Sdim    OPCODE(IPM);
2213251607Sdim    OPCODE(ATOMIC_SWAPW);
2214251607Sdim    OPCODE(ATOMIC_LOADW_ADD);
2215251607Sdim    OPCODE(ATOMIC_LOADW_SUB);
2216251607Sdim    OPCODE(ATOMIC_LOADW_AND);
2217251607Sdim    OPCODE(ATOMIC_LOADW_OR);
2218251607Sdim    OPCODE(ATOMIC_LOADW_XOR);
2219251607Sdim    OPCODE(ATOMIC_LOADW_NAND);
2220251607Sdim    OPCODE(ATOMIC_LOADW_MIN);
2221251607Sdim    OPCODE(ATOMIC_LOADW_MAX);
2222251607Sdim    OPCODE(ATOMIC_LOADW_UMIN);
2223251607Sdim    OPCODE(ATOMIC_LOADW_UMAX);
2224251607Sdim    OPCODE(ATOMIC_CMP_SWAPW);
2225263509Sdim    OPCODE(PREFETCH);
2226251607Sdim  }
2227251607Sdim  return NULL;
2228251607Sdim#undef OPCODE
2229251607Sdim}
2230251607Sdim
2231251607Sdim//===----------------------------------------------------------------------===//
2232251607Sdim// Custom insertion
2233251607Sdim//===----------------------------------------------------------------------===//
2234251607Sdim
2235251607Sdim// Create a new basic block after MBB.
2236251607Sdimstatic MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
2237251607Sdim  MachineFunction &MF = *MBB->getParent();
2238251607Sdim  MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
2239251607Sdim  MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
2240251607Sdim  return NewMBB;
2241251607Sdim}
2242251607Sdim
2243251607Sdim// Split MBB after MI and return the new block (the one that contains
2244251607Sdim// instructions after MI).
2245251607Sdimstatic MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
2246251607Sdim                                          MachineBasicBlock *MBB) {
2247251607Sdim  MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2248251607Sdim  NewMBB->splice(NewMBB->begin(), MBB,
2249251607Sdim                 llvm::next(MachineBasicBlock::iterator(MI)),
2250251607Sdim                 MBB->end());
2251251607Sdim  NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2252251607Sdim  return NewMBB;
2253251607Sdim}
2254251607Sdim
2255263509Sdim// Split MBB before MI and return the new block (the one that contains MI).
2256263509Sdimstatic MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
2257263509Sdim                                           MachineBasicBlock *MBB) {
2258263509Sdim  MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2259263509Sdim  NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2260263509Sdim  NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2261263509Sdim  return NewMBB;
2262263509Sdim}
2263263509Sdim
2264263509Sdim// Force base value Base into a register before MI.  Return the register.
2265263509Sdimstatic unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
2266263509Sdim                         const SystemZInstrInfo *TII) {
2267263509Sdim  if (Base.isReg())
2268263509Sdim    return Base.getReg();
2269263509Sdim
2270263509Sdim  MachineBasicBlock *MBB = MI->getParent();
2271263509Sdim  MachineFunction &MF = *MBB->getParent();
2272263509Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2273263509Sdim
2274263509Sdim  unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2275263509Sdim  BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
2276263509Sdim    .addOperand(Base).addImm(0).addReg(0);
2277263509Sdim  return Reg;
2278263509Sdim}
2279263509Sdim
2280251607Sdim// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
2281251607SdimMachineBasicBlock *
2282251607SdimSystemZTargetLowering::emitSelect(MachineInstr *MI,
2283251607Sdim                                  MachineBasicBlock *MBB) const {
2284251607Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2285251607Sdim
2286251607Sdim  unsigned DestReg  = MI->getOperand(0).getReg();
2287251607Sdim  unsigned TrueReg  = MI->getOperand(1).getReg();
2288251607Sdim  unsigned FalseReg = MI->getOperand(2).getReg();
2289263509Sdim  unsigned CCValid  = MI->getOperand(3).getImm();
2290263509Sdim  unsigned CCMask   = MI->getOperand(4).getImm();
2291251607Sdim  DebugLoc DL       = MI->getDebugLoc();
2292251607Sdim
2293251607Sdim  MachineBasicBlock *StartMBB = MBB;
2294263509Sdim  MachineBasicBlock *JoinMBB  = splitBlockBefore(MI, MBB);
2295251607Sdim  MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
2296251607Sdim
2297251607Sdim  //  StartMBB:
2298263509Sdim  //   BRC CCMask, JoinMBB
2299251607Sdim  //   # fallthrough to FalseMBB
2300251607Sdim  MBB = StartMBB;
2301263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2302263509Sdim    .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
2303251607Sdim  MBB->addSuccessor(JoinMBB);
2304251607Sdim  MBB->addSuccessor(FalseMBB);
2305251607Sdim
2306251607Sdim  //  FalseMBB:
2307251607Sdim  //   # fallthrough to JoinMBB
2308251607Sdim  MBB = FalseMBB;
2309251607Sdim  MBB->addSuccessor(JoinMBB);
2310251607Sdim
2311251607Sdim  //  JoinMBB:
2312251607Sdim  //   %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
2313251607Sdim  //  ...
2314251607Sdim  MBB = JoinMBB;
2315263509Sdim  BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
2316251607Sdim    .addReg(TrueReg).addMBB(StartMBB)
2317251607Sdim    .addReg(FalseReg).addMBB(FalseMBB);
2318251607Sdim
2319251607Sdim  MI->eraseFromParent();
2320251607Sdim  return JoinMBB;
2321251607Sdim}
2322251607Sdim
2323263509Sdim// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
2324263509Sdim// StoreOpcode is the store to use and Invert says whether the store should
2325263509Sdim// happen when the condition is false rather than true.  If a STORE ON
2326263509Sdim// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
2327263509SdimMachineBasicBlock *
2328263509SdimSystemZTargetLowering::emitCondStore(MachineInstr *MI,
2329263509Sdim                                     MachineBasicBlock *MBB,
2330263509Sdim                                     unsigned StoreOpcode, unsigned STOCOpcode,
2331263509Sdim                                     bool Invert) const {
2332263509Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2333263509Sdim
2334263509Sdim  unsigned SrcReg     = MI->getOperand(0).getReg();
2335263509Sdim  MachineOperand Base = MI->getOperand(1);
2336263509Sdim  int64_t Disp        = MI->getOperand(2).getImm();
2337263509Sdim  unsigned IndexReg   = MI->getOperand(3).getReg();
2338263509Sdim  unsigned CCValid    = MI->getOperand(4).getImm();
2339263509Sdim  unsigned CCMask     = MI->getOperand(5).getImm();
2340263509Sdim  DebugLoc DL         = MI->getDebugLoc();
2341263509Sdim
2342263509Sdim  StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
2343263509Sdim
2344263509Sdim  // Use STOCOpcode if possible.  We could use different store patterns in
2345263509Sdim  // order to avoid matching the index register, but the performance trade-offs
2346263509Sdim  // might be more complicated in that case.
2347263509Sdim  if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
2348263509Sdim    if (Invert)
2349263509Sdim      CCMask ^= CCValid;
2350263509Sdim    BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
2351263509Sdim      .addReg(SrcReg).addOperand(Base).addImm(Disp)
2352263509Sdim      .addImm(CCValid).addImm(CCMask);
2353263509Sdim    MI->eraseFromParent();
2354263509Sdim    return MBB;
2355263509Sdim  }
2356263509Sdim
2357263509Sdim  // Get the condition needed to branch around the store.
2358263509Sdim  if (!Invert)
2359263509Sdim    CCMask ^= CCValid;
2360263509Sdim
2361263509Sdim  MachineBasicBlock *StartMBB = MBB;
2362263509Sdim  MachineBasicBlock *JoinMBB  = splitBlockBefore(MI, MBB);
2363263509Sdim  MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
2364263509Sdim
2365263509Sdim  //  StartMBB:
2366263509Sdim  //   BRC CCMask, JoinMBB
2367263509Sdim  //   # fallthrough to FalseMBB
2368263509Sdim  MBB = StartMBB;
2369263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2370263509Sdim    .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
2371263509Sdim  MBB->addSuccessor(JoinMBB);
2372263509Sdim  MBB->addSuccessor(FalseMBB);
2373263509Sdim
2374263509Sdim  //  FalseMBB:
2375263509Sdim  //   store %SrcReg, %Disp(%Index,%Base)
2376263509Sdim  //   # fallthrough to JoinMBB
2377263509Sdim  MBB = FalseMBB;
2378263509Sdim  BuildMI(MBB, DL, TII->get(StoreOpcode))
2379263509Sdim    .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
2380263509Sdim  MBB->addSuccessor(JoinMBB);
2381263509Sdim
2382263509Sdim  MI->eraseFromParent();
2383263509Sdim  return JoinMBB;
2384263509Sdim}
2385263509Sdim
2386251607Sdim// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
2387251607Sdim// or ATOMIC_SWAP{,W} instruction MI.  BinOpcode is the instruction that
2388251607Sdim// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
2389251607Sdim// BitSize is the width of the field in bits, or 0 if this is a partword
2390251607Sdim// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
2391251607Sdim// is one of the operands.  Invert says whether the field should be
2392251607Sdim// inverted after performing BinOpcode (e.g. for NAND).
2393251607SdimMachineBasicBlock *
2394251607SdimSystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
2395251607Sdim                                            MachineBasicBlock *MBB,
2396251607Sdim                                            unsigned BinOpcode,
2397251607Sdim                                            unsigned BitSize,
2398251607Sdim                                            bool Invert) const {
2399251607Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2400251607Sdim  MachineFunction &MF = *MBB->getParent();
2401251607Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2402251607Sdim  bool IsSubWord = (BitSize < 32);
2403251607Sdim
2404251607Sdim  // Extract the operands.  Base can be a register or a frame index.
2405251607Sdim  // Src2 can be a register or immediate.
2406251607Sdim  unsigned Dest        = MI->getOperand(0).getReg();
2407251607Sdim  MachineOperand Base  = earlyUseOperand(MI->getOperand(1));
2408251607Sdim  int64_t Disp         = MI->getOperand(2).getImm();
2409251607Sdim  MachineOperand Src2  = earlyUseOperand(MI->getOperand(3));
2410251607Sdim  unsigned BitShift    = (IsSubWord ? MI->getOperand(4).getReg() : 0);
2411251607Sdim  unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
2412251607Sdim  DebugLoc DL          = MI->getDebugLoc();
2413251607Sdim  if (IsSubWord)
2414251607Sdim    BitSize = MI->getOperand(6).getImm();
2415251607Sdim
2416251607Sdim  // Subword operations use 32-bit registers.
2417251607Sdim  const TargetRegisterClass *RC = (BitSize <= 32 ?
2418251607Sdim                                   &SystemZ::GR32BitRegClass :
2419251607Sdim                                   &SystemZ::GR64BitRegClass);
2420251607Sdim  unsigned LOpcode  = BitSize <= 32 ? SystemZ::L  : SystemZ::LG;
2421251607Sdim  unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
2422251607Sdim
2423251607Sdim  // Get the right opcodes for the displacement.
2424251607Sdim  LOpcode  = TII->getOpcodeForOffset(LOpcode,  Disp);
2425251607Sdim  CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
2426251607Sdim  assert(LOpcode && CSOpcode && "Displacement out of range");
2427251607Sdim
2428251607Sdim  // Create virtual registers for temporary results.
2429251607Sdim  unsigned OrigVal       = MRI.createVirtualRegister(RC);
2430251607Sdim  unsigned OldVal        = MRI.createVirtualRegister(RC);
2431251607Sdim  unsigned NewVal        = (BinOpcode || IsSubWord ?
2432251607Sdim                            MRI.createVirtualRegister(RC) : Src2.getReg());
2433251607Sdim  unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
2434251607Sdim  unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
2435251607Sdim
2436251607Sdim  // Insert a basic block for the main loop.
2437251607Sdim  MachineBasicBlock *StartMBB = MBB;
2438263509Sdim  MachineBasicBlock *DoneMBB  = splitBlockBefore(MI, MBB);
2439251607Sdim  MachineBasicBlock *LoopMBB  = emitBlockAfter(StartMBB);
2440251607Sdim
2441251607Sdim  //  StartMBB:
2442251607Sdim  //   ...
2443251607Sdim  //   %OrigVal = L Disp(%Base)
2444251607Sdim  //   # fall through to LoopMMB
2445251607Sdim  MBB = StartMBB;
2446251607Sdim  BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
2447251607Sdim    .addOperand(Base).addImm(Disp).addReg(0);
2448251607Sdim  MBB->addSuccessor(LoopMBB);
2449251607Sdim
2450251607Sdim  //  LoopMBB:
2451251607Sdim  //   %OldVal        = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
2452251607Sdim  //   %RotatedOldVal = RLL %OldVal, 0(%BitShift)
2453251607Sdim  //   %RotatedNewVal = OP %RotatedOldVal, %Src2
2454251607Sdim  //   %NewVal        = RLL %RotatedNewVal, 0(%NegBitShift)
2455251607Sdim  //   %Dest          = CS %OldVal, %NewVal, Disp(%Base)
2456251607Sdim  //   JNE LoopMBB
2457251607Sdim  //   # fall through to DoneMMB
2458251607Sdim  MBB = LoopMBB;
2459251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2460251607Sdim    .addReg(OrigVal).addMBB(StartMBB)
2461251607Sdim    .addReg(Dest).addMBB(LoopMBB);
2462251607Sdim  if (IsSubWord)
2463251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
2464251607Sdim      .addReg(OldVal).addReg(BitShift).addImm(0);
2465251607Sdim  if (Invert) {
2466251607Sdim    // Perform the operation normally and then invert every bit of the field.
2467251607Sdim    unsigned Tmp = MRI.createVirtualRegister(RC);
2468251607Sdim    BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
2469251607Sdim      .addReg(RotatedOldVal).addOperand(Src2);
2470251607Sdim    if (BitSize < 32)
2471251607Sdim      // XILF with the upper BitSize bits set.
2472263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
2473251607Sdim        .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
2474251607Sdim    else if (BitSize == 32)
2475251607Sdim      // XILF with every bit set.
2476263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
2477251607Sdim        .addReg(Tmp).addImm(~uint32_t(0));
2478251607Sdim    else {
2479251607Sdim      // Use LCGR and add -1 to the result, which is more compact than
2480251607Sdim      // an XILF, XILH pair.
2481251607Sdim      unsigned Tmp2 = MRI.createVirtualRegister(RC);
2482251607Sdim      BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
2483251607Sdim      BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
2484251607Sdim        .addReg(Tmp2).addImm(-1);
2485251607Sdim    }
2486251607Sdim  } else if (BinOpcode)
2487251607Sdim    // A simply binary operation.
2488251607Sdim    BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
2489251607Sdim      .addReg(RotatedOldVal).addOperand(Src2);
2490251607Sdim  else if (IsSubWord)
2491251607Sdim    // Use RISBG to rotate Src2 into position and use it to replace the
2492251607Sdim    // field in RotatedOldVal.
2493251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
2494251607Sdim      .addReg(RotatedOldVal).addReg(Src2.getReg())
2495251607Sdim      .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
2496251607Sdim  if (IsSubWord)
2497251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2498251607Sdim      .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2499251607Sdim  BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2500251607Sdim    .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
2501263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2502263509Sdim    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2503251607Sdim  MBB->addSuccessor(LoopMBB);
2504251607Sdim  MBB->addSuccessor(DoneMBB);
2505251607Sdim
2506251607Sdim  MI->eraseFromParent();
2507251607Sdim  return DoneMBB;
2508251607Sdim}
2509251607Sdim
2510251607Sdim// Implement EmitInstrWithCustomInserter for pseudo
2511251607Sdim// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI.  CompareOpcode is the
2512251607Sdim// instruction that should be used to compare the current field with the
2513251607Sdim// minimum or maximum value.  KeepOldMask is the BRC condition-code mask
2514251607Sdim// for when the current field should be kept.  BitSize is the width of
2515251607Sdim// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
2516251607SdimMachineBasicBlock *
2517251607SdimSystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
2518251607Sdim                                            MachineBasicBlock *MBB,
2519251607Sdim                                            unsigned CompareOpcode,
2520251607Sdim                                            unsigned KeepOldMask,
2521251607Sdim                                            unsigned BitSize) const {
2522251607Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2523251607Sdim  MachineFunction &MF = *MBB->getParent();
2524251607Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2525251607Sdim  bool IsSubWord = (BitSize < 32);
2526251607Sdim
2527251607Sdim  // Extract the operands.  Base can be a register or a frame index.
2528251607Sdim  unsigned Dest        = MI->getOperand(0).getReg();
2529251607Sdim  MachineOperand Base  = earlyUseOperand(MI->getOperand(1));
2530251607Sdim  int64_t  Disp        = MI->getOperand(2).getImm();
2531251607Sdim  unsigned Src2        = MI->getOperand(3).getReg();
2532251607Sdim  unsigned BitShift    = (IsSubWord ? MI->getOperand(4).getReg() : 0);
2533251607Sdim  unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
2534251607Sdim  DebugLoc DL          = MI->getDebugLoc();
2535251607Sdim  if (IsSubWord)
2536251607Sdim    BitSize = MI->getOperand(6).getImm();
2537251607Sdim
2538251607Sdim  // Subword operations use 32-bit registers.
2539251607Sdim  const TargetRegisterClass *RC = (BitSize <= 32 ?
2540251607Sdim                                   &SystemZ::GR32BitRegClass :
2541251607Sdim                                   &SystemZ::GR64BitRegClass);
2542251607Sdim  unsigned LOpcode  = BitSize <= 32 ? SystemZ::L  : SystemZ::LG;
2543251607Sdim  unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
2544251607Sdim
2545251607Sdim  // Get the right opcodes for the displacement.
2546251607Sdim  LOpcode  = TII->getOpcodeForOffset(LOpcode,  Disp);
2547251607Sdim  CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
2548251607Sdim  assert(LOpcode && CSOpcode && "Displacement out of range");
2549251607Sdim
2550251607Sdim  // Create virtual registers for temporary results.
2551251607Sdim  unsigned OrigVal       = MRI.createVirtualRegister(RC);
2552251607Sdim  unsigned OldVal        = MRI.createVirtualRegister(RC);
2553251607Sdim  unsigned NewVal        = MRI.createVirtualRegister(RC);
2554251607Sdim  unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
2555251607Sdim  unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
2556251607Sdim  unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
2557251607Sdim
2558251607Sdim  // Insert 3 basic blocks for the loop.
2559251607Sdim  MachineBasicBlock *StartMBB  = MBB;
2560263509Sdim  MachineBasicBlock *DoneMBB   = splitBlockBefore(MI, MBB);
2561251607Sdim  MachineBasicBlock *LoopMBB   = emitBlockAfter(StartMBB);
2562251607Sdim  MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
2563251607Sdim  MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
2564251607Sdim
2565251607Sdim  //  StartMBB:
2566251607Sdim  //   ...
2567251607Sdim  //   %OrigVal     = L Disp(%Base)
2568251607Sdim  //   # fall through to LoopMMB
2569251607Sdim  MBB = StartMBB;
2570251607Sdim  BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
2571251607Sdim    .addOperand(Base).addImm(Disp).addReg(0);
2572251607Sdim  MBB->addSuccessor(LoopMBB);
2573251607Sdim
2574251607Sdim  //  LoopMBB:
2575251607Sdim  //   %OldVal        = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
2576251607Sdim  //   %RotatedOldVal = RLL %OldVal, 0(%BitShift)
2577251607Sdim  //   CompareOpcode %RotatedOldVal, %Src2
2578263509Sdim  //   BRC KeepOldMask, UpdateMBB
2579251607Sdim  MBB = LoopMBB;
2580251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2581251607Sdim    .addReg(OrigVal).addMBB(StartMBB)
2582251607Sdim    .addReg(Dest).addMBB(UpdateMBB);
2583251607Sdim  if (IsSubWord)
2584251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
2585251607Sdim      .addReg(OldVal).addReg(BitShift).addImm(0);
2586251607Sdim  BuildMI(MBB, DL, TII->get(CompareOpcode))
2587251607Sdim    .addReg(RotatedOldVal).addReg(Src2);
2588263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2589263509Sdim    .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
2590251607Sdim  MBB->addSuccessor(UpdateMBB);
2591251607Sdim  MBB->addSuccessor(UseAltMBB);
2592251607Sdim
2593251607Sdim  //  UseAltMBB:
2594251607Sdim  //   %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
2595251607Sdim  //   # fall through to UpdateMMB
2596251607Sdim  MBB = UseAltMBB;
2597251607Sdim  if (IsSubWord)
2598251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
2599251607Sdim      .addReg(RotatedOldVal).addReg(Src2)
2600251607Sdim      .addImm(32).addImm(31 + BitSize).addImm(0);
2601251607Sdim  MBB->addSuccessor(UpdateMBB);
2602251607Sdim
2603251607Sdim  //  UpdateMBB:
2604251607Sdim  //   %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
2605251607Sdim  //                        [ %RotatedAltVal, UseAltMBB ]
2606251607Sdim  //   %NewVal        = RLL %RotatedNewVal, 0(%NegBitShift)
2607251607Sdim  //   %Dest          = CS %OldVal, %NewVal, Disp(%Base)
2608251607Sdim  //   JNE LoopMBB
2609251607Sdim  //   # fall through to DoneMMB
2610251607Sdim  MBB = UpdateMBB;
2611251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
2612251607Sdim    .addReg(RotatedOldVal).addMBB(LoopMBB)
2613251607Sdim    .addReg(RotatedAltVal).addMBB(UseAltMBB);
2614251607Sdim  if (IsSubWord)
2615251607Sdim    BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2616251607Sdim      .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2617251607Sdim  BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2618251607Sdim    .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
2619263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2620263509Sdim    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2621251607Sdim  MBB->addSuccessor(LoopMBB);
2622251607Sdim  MBB->addSuccessor(DoneMBB);
2623251607Sdim
2624251607Sdim  MI->eraseFromParent();
2625251607Sdim  return DoneMBB;
2626251607Sdim}
2627251607Sdim
2628251607Sdim// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
2629251607Sdim// instruction MI.
2630251607SdimMachineBasicBlock *
2631251607SdimSystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
2632251607Sdim                                          MachineBasicBlock *MBB) const {
2633251607Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2634251607Sdim  MachineFunction &MF = *MBB->getParent();
2635251607Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2636251607Sdim
2637251607Sdim  // Extract the operands.  Base can be a register or a frame index.
2638251607Sdim  unsigned Dest        = MI->getOperand(0).getReg();
2639251607Sdim  MachineOperand Base  = earlyUseOperand(MI->getOperand(1));
2640251607Sdim  int64_t  Disp        = MI->getOperand(2).getImm();
2641251607Sdim  unsigned OrigCmpVal  = MI->getOperand(3).getReg();
2642251607Sdim  unsigned OrigSwapVal = MI->getOperand(4).getReg();
2643251607Sdim  unsigned BitShift    = MI->getOperand(5).getReg();
2644251607Sdim  unsigned NegBitShift = MI->getOperand(6).getReg();
2645251607Sdim  int64_t  BitSize     = MI->getOperand(7).getImm();
2646251607Sdim  DebugLoc DL          = MI->getDebugLoc();
2647251607Sdim
2648251607Sdim  const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
2649251607Sdim
2650251607Sdim  // Get the right opcodes for the displacement.
2651251607Sdim  unsigned LOpcode  = TII->getOpcodeForOffset(SystemZ::L,  Disp);
2652251607Sdim  unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
2653251607Sdim  assert(LOpcode && CSOpcode && "Displacement out of range");
2654251607Sdim
2655251607Sdim  // Create virtual registers for temporary results.
2656251607Sdim  unsigned OrigOldVal   = MRI.createVirtualRegister(RC);
2657251607Sdim  unsigned OldVal       = MRI.createVirtualRegister(RC);
2658251607Sdim  unsigned CmpVal       = MRI.createVirtualRegister(RC);
2659251607Sdim  unsigned SwapVal      = MRI.createVirtualRegister(RC);
2660251607Sdim  unsigned StoreVal     = MRI.createVirtualRegister(RC);
2661251607Sdim  unsigned RetryOldVal  = MRI.createVirtualRegister(RC);
2662251607Sdim  unsigned RetryCmpVal  = MRI.createVirtualRegister(RC);
2663251607Sdim  unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
2664251607Sdim
2665251607Sdim  // Insert 2 basic blocks for the loop.
2666251607Sdim  MachineBasicBlock *StartMBB = MBB;
2667263509Sdim  MachineBasicBlock *DoneMBB  = splitBlockBefore(MI, MBB);
2668251607Sdim  MachineBasicBlock *LoopMBB  = emitBlockAfter(StartMBB);
2669251607Sdim  MachineBasicBlock *SetMBB   = emitBlockAfter(LoopMBB);
2670251607Sdim
2671251607Sdim  //  StartMBB:
2672251607Sdim  //   ...
2673251607Sdim  //   %OrigOldVal     = L Disp(%Base)
2674251607Sdim  //   # fall through to LoopMMB
2675251607Sdim  MBB = StartMBB;
2676251607Sdim  BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
2677251607Sdim    .addOperand(Base).addImm(Disp).addReg(0);
2678251607Sdim  MBB->addSuccessor(LoopMBB);
2679251607Sdim
2680251607Sdim  //  LoopMBB:
2681251607Sdim  //   %OldVal        = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
2682251607Sdim  //   %CmpVal        = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
2683251607Sdim  //   %SwapVal       = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
2684251607Sdim  //   %Dest          = RLL %OldVal, BitSize(%BitShift)
2685251607Sdim  //                      ^^ The low BitSize bits contain the field
2686251607Sdim  //                         of interest.
2687251607Sdim  //   %RetryCmpVal   = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
2688251607Sdim  //                      ^^ Replace the upper 32-BitSize bits of the
2689251607Sdim  //                         comparison value with those that we loaded,
2690251607Sdim  //                         so that we can use a full word comparison.
2691251607Sdim  //   CR %Dest, %RetryCmpVal
2692251607Sdim  //   JNE DoneMBB
2693251607Sdim  //   # Fall through to SetMBB
2694251607Sdim  MBB = LoopMBB;
2695251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2696251607Sdim    .addReg(OrigOldVal).addMBB(StartMBB)
2697251607Sdim    .addReg(RetryOldVal).addMBB(SetMBB);
2698251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
2699251607Sdim    .addReg(OrigCmpVal).addMBB(StartMBB)
2700251607Sdim    .addReg(RetryCmpVal).addMBB(SetMBB);
2701251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
2702251607Sdim    .addReg(OrigSwapVal).addMBB(StartMBB)
2703251607Sdim    .addReg(RetrySwapVal).addMBB(SetMBB);
2704251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
2705251607Sdim    .addReg(OldVal).addReg(BitShift).addImm(BitSize);
2706251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
2707251607Sdim    .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
2708251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::CR))
2709251607Sdim    .addReg(Dest).addReg(RetryCmpVal);
2710263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2711263509Sdim    .addImm(SystemZ::CCMASK_ICMP)
2712263509Sdim    .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
2713251607Sdim  MBB->addSuccessor(DoneMBB);
2714251607Sdim  MBB->addSuccessor(SetMBB);
2715251607Sdim
2716251607Sdim  //  SetMBB:
2717251607Sdim  //   %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
2718251607Sdim  //                      ^^ Replace the upper 32-BitSize bits of the new
2719251607Sdim  //                         value with those that we loaded.
2720251607Sdim  //   %StoreVal    = RLL %RetrySwapVal, -BitSize(%NegBitShift)
2721251607Sdim  //                      ^^ Rotate the new field to its proper position.
2722251607Sdim  //   %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
2723251607Sdim  //   JNE LoopMBB
2724251607Sdim  //   # fall through to ExitMMB
2725251607Sdim  MBB = SetMBB;
2726251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
2727251607Sdim    .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
2728251607Sdim  BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
2729251607Sdim    .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
2730251607Sdim  BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
2731251607Sdim    .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
2732263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2733263509Sdim    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2734251607Sdim  MBB->addSuccessor(LoopMBB);
2735251607Sdim  MBB->addSuccessor(DoneMBB);
2736251607Sdim
2737251607Sdim  MI->eraseFromParent();
2738251607Sdim  return DoneMBB;
2739251607Sdim}
2740251607Sdim
2741251607Sdim// Emit an extension from a GR32 or GR64 to a GR128.  ClearEven is true
2742251607Sdim// if the high register of the GR128 value must be cleared or false if
2743263509Sdim// it's "don't care".  SubReg is subreg_l32 when extending a GR32
2744263509Sdim// and subreg_l64 when extending a GR64.
2745251607SdimMachineBasicBlock *
2746251607SdimSystemZTargetLowering::emitExt128(MachineInstr *MI,
2747251607Sdim                                  MachineBasicBlock *MBB,
2748251607Sdim                                  bool ClearEven, unsigned SubReg) const {
2749251607Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2750251607Sdim  MachineFunction &MF = *MBB->getParent();
2751251607Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2752251607Sdim  DebugLoc DL = MI->getDebugLoc();
2753251607Sdim
2754251607Sdim  unsigned Dest  = MI->getOperand(0).getReg();
2755251607Sdim  unsigned Src   = MI->getOperand(1).getReg();
2756251607Sdim  unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2757251607Sdim
2758251607Sdim  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
2759251607Sdim  if (ClearEven) {
2760251607Sdim    unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2761251607Sdim    unsigned Zero64   = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2762251607Sdim
2763251607Sdim    BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
2764251607Sdim      .addImm(0);
2765251607Sdim    BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
2766263509Sdim      .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
2767251607Sdim    In128 = NewIn128;
2768251607Sdim  }
2769251607Sdim  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
2770251607Sdim    .addReg(In128).addReg(Src).addImm(SubReg);
2771251607Sdim
2772251607Sdim  MI->eraseFromParent();
2773251607Sdim  return MBB;
2774251607Sdim}
2775251607Sdim
2776263509SdimMachineBasicBlock *
2777263509SdimSystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
2778263509Sdim                                         MachineBasicBlock *MBB,
2779263509Sdim                                         unsigned Opcode) const {
2780263509Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2781263509Sdim  MachineFunction &MF = *MBB->getParent();
2782263509Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2783263509Sdim  DebugLoc DL = MI->getDebugLoc();
2784263509Sdim
2785263509Sdim  MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
2786263509Sdim  uint64_t       DestDisp = MI->getOperand(1).getImm();
2787263509Sdim  MachineOperand SrcBase  = earlyUseOperand(MI->getOperand(2));
2788263509Sdim  uint64_t       SrcDisp  = MI->getOperand(3).getImm();
2789263509Sdim  uint64_t       Length   = MI->getOperand(4).getImm();
2790263509Sdim
2791263509Sdim  // When generating more than one CLC, all but the last will need to
2792263509Sdim  // branch to the end when a difference is found.
2793263509Sdim  MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
2794263509Sdim                               splitBlockAfter(MI, MBB) : 0);
2795263509Sdim
2796263509Sdim  // Check for the loop form, in which operand 5 is the trip count.
2797263509Sdim  if (MI->getNumExplicitOperands() > 5) {
2798263509Sdim    bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
2799263509Sdim
2800263509Sdim    uint64_t StartCountReg = MI->getOperand(5).getReg();
2801263509Sdim    uint64_t StartSrcReg   = forceReg(MI, SrcBase, TII);
2802263509Sdim    uint64_t StartDestReg  = (HaveSingleBase ? StartSrcReg :
2803263509Sdim                              forceReg(MI, DestBase, TII));
2804263509Sdim
2805263509Sdim    const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
2806263509Sdim    uint64_t ThisSrcReg  = MRI.createVirtualRegister(RC);
2807263509Sdim    uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
2808263509Sdim                            MRI.createVirtualRegister(RC));
2809263509Sdim    uint64_t NextSrcReg  = MRI.createVirtualRegister(RC);
2810263509Sdim    uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
2811263509Sdim                            MRI.createVirtualRegister(RC));
2812263509Sdim
2813263509Sdim    RC = &SystemZ::GR64BitRegClass;
2814263509Sdim    uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
2815263509Sdim    uint64_t NextCountReg = MRI.createVirtualRegister(RC);
2816263509Sdim
2817263509Sdim    MachineBasicBlock *StartMBB = MBB;
2818263509Sdim    MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2819263509Sdim    MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2820263509Sdim    MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
2821263509Sdim
2822263509Sdim    //  StartMBB:
2823263509Sdim    //   # fall through to LoopMMB
2824263509Sdim    MBB->addSuccessor(LoopMBB);
2825263509Sdim
2826263509Sdim    //  LoopMBB:
2827263509Sdim    //   %ThisDestReg = phi [ %StartDestReg, StartMBB ],
2828263509Sdim    //                      [ %NextDestReg, NextMBB ]
2829263509Sdim    //   %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
2830263509Sdim    //                     [ %NextSrcReg, NextMBB ]
2831263509Sdim    //   %ThisCountReg = phi [ %StartCountReg, StartMBB ],
2832263509Sdim    //                       [ %NextCountReg, NextMBB ]
2833263509Sdim    //   ( PFD 2, 768+DestDisp(%ThisDestReg) )
2834263509Sdim    //   Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
2835263509Sdim    //   ( JLH EndMBB )
2836263509Sdim    //
2837263509Sdim    // The prefetch is used only for MVC.  The JLH is used only for CLC.
2838263509Sdim    MBB = LoopMBB;
2839263509Sdim
2840263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
2841263509Sdim      .addReg(StartDestReg).addMBB(StartMBB)
2842263509Sdim      .addReg(NextDestReg).addMBB(NextMBB);
2843263509Sdim    if (!HaveSingleBase)
2844263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
2845263509Sdim        .addReg(StartSrcReg).addMBB(StartMBB)
2846263509Sdim        .addReg(NextSrcReg).addMBB(NextMBB);
2847263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
2848263509Sdim      .addReg(StartCountReg).addMBB(StartMBB)
2849263509Sdim      .addReg(NextCountReg).addMBB(NextMBB);
2850263509Sdim    if (Opcode == SystemZ::MVC)
2851263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::PFD))
2852263509Sdim        .addImm(SystemZ::PFD_WRITE)
2853263509Sdim        .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
2854263509Sdim    BuildMI(MBB, DL, TII->get(Opcode))
2855263509Sdim      .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
2856263509Sdim      .addReg(ThisSrcReg).addImm(SrcDisp);
2857263509Sdim    if (EndMBB) {
2858263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2859263509Sdim        .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2860263509Sdim        .addMBB(EndMBB);
2861263509Sdim      MBB->addSuccessor(EndMBB);
2862263509Sdim      MBB->addSuccessor(NextMBB);
2863263509Sdim    }
2864263509Sdim
2865263509Sdim    // NextMBB:
2866263509Sdim    //   %NextDestReg = LA 256(%ThisDestReg)
2867263509Sdim    //   %NextSrcReg = LA 256(%ThisSrcReg)
2868263509Sdim    //   %NextCountReg = AGHI %ThisCountReg, -1
2869263509Sdim    //   CGHI %NextCountReg, 0
2870263509Sdim    //   JLH LoopMBB
2871263509Sdim    //   # fall through to DoneMMB
2872263509Sdim    //
2873263509Sdim    // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
2874263509Sdim    MBB = NextMBB;
2875263509Sdim
2876263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
2877263509Sdim      .addReg(ThisDestReg).addImm(256).addReg(0);
2878263509Sdim    if (!HaveSingleBase)
2879263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
2880263509Sdim        .addReg(ThisSrcReg).addImm(256).addReg(0);
2881263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
2882263509Sdim      .addReg(ThisCountReg).addImm(-1);
2883263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
2884263509Sdim      .addReg(NextCountReg).addImm(0);
2885263509Sdim    BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2886263509Sdim      .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2887263509Sdim      .addMBB(LoopMBB);
2888263509Sdim    MBB->addSuccessor(LoopMBB);
2889263509Sdim    MBB->addSuccessor(DoneMBB);
2890263509Sdim
2891263509Sdim    DestBase = MachineOperand::CreateReg(NextDestReg, false);
2892263509Sdim    SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
2893263509Sdim    Length &= 255;
2894263509Sdim    MBB = DoneMBB;
2895263509Sdim  }
2896263509Sdim  // Handle any remaining bytes with straight-line code.
2897263509Sdim  while (Length > 0) {
2898263509Sdim    uint64_t ThisLength = std::min(Length, uint64_t(256));
2899263509Sdim    // The previous iteration might have created out-of-range displacements.
2900263509Sdim    // Apply them using LAY if so.
2901263509Sdim    if (!isUInt<12>(DestDisp)) {
2902263509Sdim      unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2903263509Sdim      BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
2904263509Sdim        .addOperand(DestBase).addImm(DestDisp).addReg(0);
2905263509Sdim      DestBase = MachineOperand::CreateReg(Reg, false);
2906263509Sdim      DestDisp = 0;
2907263509Sdim    }
2908263509Sdim    if (!isUInt<12>(SrcDisp)) {
2909263509Sdim      unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2910263509Sdim      BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
2911263509Sdim        .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
2912263509Sdim      SrcBase = MachineOperand::CreateReg(Reg, false);
2913263509Sdim      SrcDisp = 0;
2914263509Sdim    }
2915263509Sdim    BuildMI(*MBB, MI, DL, TII->get(Opcode))
2916263509Sdim      .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
2917263509Sdim      .addOperand(SrcBase).addImm(SrcDisp);
2918263509Sdim    DestDisp += ThisLength;
2919263509Sdim    SrcDisp += ThisLength;
2920263509Sdim    Length -= ThisLength;
2921263509Sdim    // If there's another CLC to go, branch to the end if a difference
2922263509Sdim    // was found.
2923263509Sdim    if (EndMBB && Length > 0) {
2924263509Sdim      MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
2925263509Sdim      BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2926263509Sdim        .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2927263509Sdim        .addMBB(EndMBB);
2928263509Sdim      MBB->addSuccessor(EndMBB);
2929263509Sdim      MBB->addSuccessor(NextMBB);
2930263509Sdim      MBB = NextMBB;
2931263509Sdim    }
2932263509Sdim  }
2933263509Sdim  if (EndMBB) {
2934263509Sdim    MBB->addSuccessor(EndMBB);
2935263509Sdim    MBB = EndMBB;
2936263509Sdim    MBB->addLiveIn(SystemZ::CC);
2937263509Sdim  }
2938263509Sdim
2939263509Sdim  MI->eraseFromParent();
2940263509Sdim  return MBB;
2941263509Sdim}
2942263509Sdim
2943263509Sdim// Decompose string pseudo-instruction MI into a loop that continually performs
2944263509Sdim// Opcode until CC != 3.
2945263509SdimMachineBasicBlock *
2946263509SdimSystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
2947263509Sdim                                         MachineBasicBlock *MBB,
2948263509Sdim                                         unsigned Opcode) const {
2949263509Sdim  const SystemZInstrInfo *TII = TM.getInstrInfo();
2950263509Sdim  MachineFunction &MF = *MBB->getParent();
2951263509Sdim  MachineRegisterInfo &MRI = MF.getRegInfo();
2952263509Sdim  DebugLoc DL = MI->getDebugLoc();
2953263509Sdim
2954263509Sdim  uint64_t End1Reg   = MI->getOperand(0).getReg();
2955263509Sdim  uint64_t Start1Reg = MI->getOperand(1).getReg();
2956263509Sdim  uint64_t Start2Reg = MI->getOperand(2).getReg();
2957263509Sdim  uint64_t CharReg   = MI->getOperand(3).getReg();
2958263509Sdim
2959263509Sdim  const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
2960263509Sdim  uint64_t This1Reg = MRI.createVirtualRegister(RC);
2961263509Sdim  uint64_t This2Reg = MRI.createVirtualRegister(RC);
2962263509Sdim  uint64_t End2Reg  = MRI.createVirtualRegister(RC);
2963263509Sdim
2964263509Sdim  MachineBasicBlock *StartMBB = MBB;
2965263509Sdim  MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2966263509Sdim  MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2967263509Sdim
2968263509Sdim  //  StartMBB:
2969263509Sdim  //   # fall through to LoopMMB
2970263509Sdim  MBB->addSuccessor(LoopMBB);
2971263509Sdim
2972263509Sdim  //  LoopMBB:
2973263509Sdim  //   %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
2974263509Sdim  //   %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
2975263509Sdim  //   R0L = %CharReg
2976263509Sdim  //   %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
2977263509Sdim  //   JO LoopMBB
2978263509Sdim  //   # fall through to DoneMMB
2979263509Sdim  //
2980263509Sdim  // The load of R0L can be hoisted by post-RA LICM.
2981263509Sdim  MBB = LoopMBB;
2982263509Sdim
2983263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
2984263509Sdim    .addReg(Start1Reg).addMBB(StartMBB)
2985263509Sdim    .addReg(End1Reg).addMBB(LoopMBB);
2986263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
2987263509Sdim    .addReg(Start2Reg).addMBB(StartMBB)
2988263509Sdim    .addReg(End2Reg).addMBB(LoopMBB);
2989263509Sdim  BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
2990263509Sdim  BuildMI(MBB, DL, TII->get(Opcode))
2991263509Sdim    .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
2992263509Sdim    .addReg(This1Reg).addReg(This2Reg);
2993263509Sdim  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2994263509Sdim    .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
2995263509Sdim  MBB->addSuccessor(LoopMBB);
2996263509Sdim  MBB->addSuccessor(DoneMBB);
2997263509Sdim
2998263509Sdim  DoneMBB->addLiveIn(SystemZ::CC);
2999263509Sdim
3000263509Sdim  MI->eraseFromParent();
3001263509Sdim  return DoneMBB;
3002263509Sdim}
3003263509Sdim
3004251607SdimMachineBasicBlock *SystemZTargetLowering::
3005251607SdimEmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
3006251607Sdim  switch (MI->getOpcode()) {
3007263509Sdim  case SystemZ::Select32Mux:
3008251607Sdim  case SystemZ::Select32:
3009251607Sdim  case SystemZ::SelectF32:
3010251607Sdim  case SystemZ::Select64:
3011251607Sdim  case SystemZ::SelectF64:
3012251607Sdim  case SystemZ::SelectF128:
3013251607Sdim    return emitSelect(MI, MBB);
3014251607Sdim
3015263509Sdim  case SystemZ::CondStore8Mux:
3016263509Sdim    return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
3017263509Sdim  case SystemZ::CondStore8MuxInv:
3018263509Sdim    return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
3019263509Sdim  case SystemZ::CondStore16Mux:
3020263509Sdim    return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
3021263509Sdim  case SystemZ::CondStore16MuxInv:
3022263509Sdim    return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
3023263509Sdim  case SystemZ::CondStore8:
3024263509Sdim    return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
3025263509Sdim  case SystemZ::CondStore8Inv:
3026263509Sdim    return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
3027263509Sdim  case SystemZ::CondStore16:
3028263509Sdim    return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
3029263509Sdim  case SystemZ::CondStore16Inv:
3030263509Sdim    return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
3031263509Sdim  case SystemZ::CondStore32:
3032263509Sdim    return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
3033263509Sdim  case SystemZ::CondStore32Inv:
3034263509Sdim    return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
3035263509Sdim  case SystemZ::CondStore64:
3036263509Sdim    return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
3037263509Sdim  case SystemZ::CondStore64Inv:
3038263509Sdim    return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
3039263509Sdim  case SystemZ::CondStoreF32:
3040263509Sdim    return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
3041263509Sdim  case SystemZ::CondStoreF32Inv:
3042263509Sdim    return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
3043263509Sdim  case SystemZ::CondStoreF64:
3044263509Sdim    return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
3045263509Sdim  case SystemZ::CondStoreF64Inv:
3046263509Sdim    return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
3047263509Sdim
3048251607Sdim  case SystemZ::AEXT128_64:
3049263509Sdim    return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
3050251607Sdim  case SystemZ::ZEXT128_32:
3051263509Sdim    return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
3052251607Sdim  case SystemZ::ZEXT128_64:
3053263509Sdim    return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
3054251607Sdim
3055251607Sdim  case SystemZ::ATOMIC_SWAPW:
3056251607Sdim    return emitAtomicLoadBinary(MI, MBB, 0, 0);
3057251607Sdim  case SystemZ::ATOMIC_SWAP_32:
3058251607Sdim    return emitAtomicLoadBinary(MI, MBB, 0, 32);
3059251607Sdim  case SystemZ::ATOMIC_SWAP_64:
3060251607Sdim    return emitAtomicLoadBinary(MI, MBB, 0, 64);
3061251607Sdim
3062251607Sdim  case SystemZ::ATOMIC_LOADW_AR:
3063251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
3064251607Sdim  case SystemZ::ATOMIC_LOADW_AFI:
3065251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
3066251607Sdim  case SystemZ::ATOMIC_LOAD_AR:
3067251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
3068251607Sdim  case SystemZ::ATOMIC_LOAD_AHI:
3069251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
3070251607Sdim  case SystemZ::ATOMIC_LOAD_AFI:
3071251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
3072251607Sdim  case SystemZ::ATOMIC_LOAD_AGR:
3073251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
3074251607Sdim  case SystemZ::ATOMIC_LOAD_AGHI:
3075251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
3076251607Sdim  case SystemZ::ATOMIC_LOAD_AGFI:
3077251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
3078251607Sdim
3079251607Sdim  case SystemZ::ATOMIC_LOADW_SR:
3080251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
3081251607Sdim  case SystemZ::ATOMIC_LOAD_SR:
3082251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
3083251607Sdim  case SystemZ::ATOMIC_LOAD_SGR:
3084251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
3085251607Sdim
3086251607Sdim  case SystemZ::ATOMIC_LOADW_NR:
3087251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
3088251607Sdim  case SystemZ::ATOMIC_LOADW_NILH:
3089263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
3090251607Sdim  case SystemZ::ATOMIC_LOAD_NR:
3091251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
3092251607Sdim  case SystemZ::ATOMIC_LOAD_NILL:
3093263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
3094251607Sdim  case SystemZ::ATOMIC_LOAD_NILH:
3095263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
3096251607Sdim  case SystemZ::ATOMIC_LOAD_NILF:
3097263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
3098263509Sdim  case SystemZ::ATOMIC_LOAD_NGR:
3099263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
3100263509Sdim  case SystemZ::ATOMIC_LOAD_NILL64:
3101263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
3102263509Sdim  case SystemZ::ATOMIC_LOAD_NILH64:
3103263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
3104263509Sdim  case SystemZ::ATOMIC_LOAD_NIHL64:
3105263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
3106263509Sdim  case SystemZ::ATOMIC_LOAD_NIHH64:
3107263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
3108263509Sdim  case SystemZ::ATOMIC_LOAD_NILF64:
3109263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
3110263509Sdim  case SystemZ::ATOMIC_LOAD_NIHF64:
3111263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
3112251607Sdim
3113251607Sdim  case SystemZ::ATOMIC_LOADW_OR:
3114251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
3115251607Sdim  case SystemZ::ATOMIC_LOADW_OILH:
3116263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
3117251607Sdim  case SystemZ::ATOMIC_LOAD_OR:
3118251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
3119251607Sdim  case SystemZ::ATOMIC_LOAD_OILL:
3120263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
3121251607Sdim  case SystemZ::ATOMIC_LOAD_OILH:
3122263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
3123251607Sdim  case SystemZ::ATOMIC_LOAD_OILF:
3124263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
3125263509Sdim  case SystemZ::ATOMIC_LOAD_OGR:
3126263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
3127263509Sdim  case SystemZ::ATOMIC_LOAD_OILL64:
3128263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
3129263509Sdim  case SystemZ::ATOMIC_LOAD_OILH64:
3130263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
3131263509Sdim  case SystemZ::ATOMIC_LOAD_OIHL64:
3132263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
3133263509Sdim  case SystemZ::ATOMIC_LOAD_OIHH64:
3134263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
3135263509Sdim  case SystemZ::ATOMIC_LOAD_OILF64:
3136263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
3137263509Sdim  case SystemZ::ATOMIC_LOAD_OIHF64:
3138263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
3139251607Sdim
3140251607Sdim  case SystemZ::ATOMIC_LOADW_XR:
3141251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
3142251607Sdim  case SystemZ::ATOMIC_LOADW_XILF:
3143263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
3144251607Sdim  case SystemZ::ATOMIC_LOAD_XR:
3145251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
3146263509Sdim  case SystemZ::ATOMIC_LOAD_XILF:
3147263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
3148251607Sdim  case SystemZ::ATOMIC_LOAD_XGR:
3149251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
3150263509Sdim  case SystemZ::ATOMIC_LOAD_XILF64:
3151263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
3152263509Sdim  case SystemZ::ATOMIC_LOAD_XIHF64:
3153263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
3154251607Sdim
3155251607Sdim  case SystemZ::ATOMIC_LOADW_NRi:
3156251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
3157251607Sdim  case SystemZ::ATOMIC_LOADW_NILHi:
3158263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
3159251607Sdim  case SystemZ::ATOMIC_LOAD_NRi:
3160251607Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
3161251607Sdim  case SystemZ::ATOMIC_LOAD_NILLi:
3162263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
3163251607Sdim  case SystemZ::ATOMIC_LOAD_NILHi:
3164263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
3165251607Sdim  case SystemZ::ATOMIC_LOAD_NILFi:
3166263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
3167263509Sdim  case SystemZ::ATOMIC_LOAD_NGRi:
3168263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
3169263509Sdim  case SystemZ::ATOMIC_LOAD_NILL64i:
3170263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
3171263509Sdim  case SystemZ::ATOMIC_LOAD_NILH64i:
3172263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
3173263509Sdim  case SystemZ::ATOMIC_LOAD_NIHL64i:
3174263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
3175263509Sdim  case SystemZ::ATOMIC_LOAD_NIHH64i:
3176263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
3177263509Sdim  case SystemZ::ATOMIC_LOAD_NILF64i:
3178263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
3179263509Sdim  case SystemZ::ATOMIC_LOAD_NIHF64i:
3180263509Sdim    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
3181251607Sdim
3182251607Sdim  case SystemZ::ATOMIC_LOADW_MIN:
3183251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3184251607Sdim                                SystemZ::CCMASK_CMP_LE, 0);
3185251607Sdim  case SystemZ::ATOMIC_LOAD_MIN_32:
3186251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3187251607Sdim                                SystemZ::CCMASK_CMP_LE, 32);
3188251607Sdim  case SystemZ::ATOMIC_LOAD_MIN_64:
3189251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3190251607Sdim                                SystemZ::CCMASK_CMP_LE, 64);
3191251607Sdim
3192251607Sdim  case SystemZ::ATOMIC_LOADW_MAX:
3193251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3194251607Sdim                                SystemZ::CCMASK_CMP_GE, 0);
3195251607Sdim  case SystemZ::ATOMIC_LOAD_MAX_32:
3196251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3197251607Sdim                                SystemZ::CCMASK_CMP_GE, 32);
3198251607Sdim  case SystemZ::ATOMIC_LOAD_MAX_64:
3199251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3200251607Sdim                                SystemZ::CCMASK_CMP_GE, 64);
3201251607Sdim
3202251607Sdim  case SystemZ::ATOMIC_LOADW_UMIN:
3203251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3204251607Sdim                                SystemZ::CCMASK_CMP_LE, 0);
3205251607Sdim  case SystemZ::ATOMIC_LOAD_UMIN_32:
3206251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3207251607Sdim                                SystemZ::CCMASK_CMP_LE, 32);
3208251607Sdim  case SystemZ::ATOMIC_LOAD_UMIN_64:
3209251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3210251607Sdim                                SystemZ::CCMASK_CMP_LE, 64);
3211251607Sdim
3212251607Sdim  case SystemZ::ATOMIC_LOADW_UMAX:
3213251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3214251607Sdim                                SystemZ::CCMASK_CMP_GE, 0);
3215251607Sdim  case SystemZ::ATOMIC_LOAD_UMAX_32:
3216251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3217251607Sdim                                SystemZ::CCMASK_CMP_GE, 32);
3218251607Sdim  case SystemZ::ATOMIC_LOAD_UMAX_64:
3219251607Sdim    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3220251607Sdim                                SystemZ::CCMASK_CMP_GE, 64);
3221251607Sdim
3222251607Sdim  case SystemZ::ATOMIC_CMP_SWAPW:
3223251607Sdim    return emitAtomicCmpSwapW(MI, MBB);
3224263509Sdim  case SystemZ::MVCSequence:
3225263509Sdim  case SystemZ::MVCLoop:
3226263509Sdim    return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
3227263509Sdim  case SystemZ::NCSequence:
3228263509Sdim  case SystemZ::NCLoop:
3229263509Sdim    return emitMemMemWrapper(MI, MBB, SystemZ::NC);
3230263509Sdim  case SystemZ::OCSequence:
3231263509Sdim  case SystemZ::OCLoop:
3232263509Sdim    return emitMemMemWrapper(MI, MBB, SystemZ::OC);
3233263509Sdim  case SystemZ::XCSequence:
3234263509Sdim  case SystemZ::XCLoop:
3235263509Sdim    return emitMemMemWrapper(MI, MBB, SystemZ::XC);
3236263509Sdim  case SystemZ::CLCSequence:
3237263509Sdim  case SystemZ::CLCLoop:
3238263509Sdim    return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
3239263509Sdim  case SystemZ::CLSTLoop:
3240263509Sdim    return emitStringWrapper(MI, MBB, SystemZ::CLST);
3241263509Sdim  case SystemZ::MVSTLoop:
3242263509Sdim    return emitStringWrapper(MI, MBB, SystemZ::MVST);
3243263509Sdim  case SystemZ::SRSTLoop:
3244263509Sdim    return emitStringWrapper(MI, MBB, SystemZ::SRST);
3245251607Sdim  default:
3246251607Sdim    llvm_unreachable("Unexpected instr type to insert");
3247251607Sdim  }
3248251607Sdim}
3249