1//===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of the abstract lowering for the Swift calling convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/CodeGen/SwiftCallingConv.h"
14#include "clang/Basic/TargetInfo.h"
15#include "CodeGenModule.h"
16#include "TargetInfo.h"
17
18using namespace clang;
19using namespace CodeGen;
20using namespace swiftcall;
21
22static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
23  return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
24}
25
26static bool isPowerOf2(unsigned n) {
27  return n == (n & -n);
28}
29
30/// Given two types with the same size, try to find a common type.
31static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
32  assert(first != second);
33
34  // Allow pointers to merge with integers, but prefer the integer type.
35  if (first->isIntegerTy()) {
36    if (second->isPointerTy()) return first;
37  } else if (first->isPointerTy()) {
38    if (second->isIntegerTy()) return second;
39    if (second->isPointerTy()) return first;
40
41  // Allow two vectors to be merged (given that they have the same size).
42  // This assumes that we never have two different vector register sets.
43  } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
44    if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
45      if (auto commonTy = getCommonType(firstVecTy->getElementType(),
46                                        secondVecTy->getElementType())) {
47        return (commonTy == firstVecTy->getElementType() ? first : second);
48      }
49    }
50  }
51
52  return nullptr;
53}
54
55static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
56  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
57}
58
59static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
60  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
61}
62
63void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
64  // Deal with various aggregate types as special cases:
65
66  // Record types.
67  if (auto recType = type->getAs<RecordType>()) {
68    addTypedData(recType->getDecl(), begin);
69
70  // Array types.
71  } else if (type->isArrayType()) {
72    // Incomplete array types (flexible array members?) don't provide
73    // data to lay out, and the other cases shouldn't be possible.
74    auto arrayType = CGM.getContext().getAsConstantArrayType(type);
75    if (!arrayType) return;
76
77    QualType eltType = arrayType->getElementType();
78    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
79    for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
80      addTypedData(eltType, begin + i * eltSize);
81    }
82
83  // Complex types.
84  } else if (auto complexType = type->getAs<ComplexType>()) {
85    auto eltType = complexType->getElementType();
86    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
87    auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
88    addTypedData(eltLLVMType, begin, begin + eltSize);
89    addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
90
91  // Member pointer types.
92  } else if (type->getAs<MemberPointerType>()) {
93    // Just add it all as opaque.
94    addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
95
96    // Atomic types.
97  } else if (const auto *atomicType = type->getAs<AtomicType>()) {
98    auto valueType = atomicType->getValueType();
99    auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
100    auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
101
102    addTypedData(atomicType->getValueType(), begin);
103
104    // Add atomic padding.
105    auto atomicPadding = atomicSize - valueSize;
106    if (atomicPadding > CharUnits::Zero())
107      addOpaqueData(begin + valueSize, begin + atomicSize);
108
109    // Everything else is scalar and should not convert as an LLVM aggregate.
110  } else {
111    // We intentionally convert as !ForMem because we want to preserve
112    // that a type was an i1.
113    auto *llvmType = CGM.getTypes().ConvertType(type);
114    addTypedData(llvmType, begin);
115  }
116}
117
118void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
119  addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
120}
121
122void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
123                                    const ASTRecordLayout &layout) {
124  // Unions are a special case.
125  if (record->isUnion()) {
126    for (auto field : record->fields()) {
127      if (field->isBitField()) {
128        addBitFieldData(field, begin, 0);
129      } else {
130        addTypedData(field->getType(), begin);
131      }
132    }
133    return;
134  }
135
136  // Note that correctness does not rely on us adding things in
137  // their actual order of layout; it's just somewhat more efficient
138  // for the builder.
139
140  // With that in mind, add "early" C++ data.
141  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
142  if (cxxRecord) {
143    //   - a v-table pointer, if the class adds its own
144    if (layout.hasOwnVFPtr()) {
145      addTypedData(CGM.Int8PtrTy, begin);
146    }
147
148    //   - non-virtual bases
149    for (auto &baseSpecifier : cxxRecord->bases()) {
150      if (baseSpecifier.isVirtual()) continue;
151
152      auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
153      addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
154    }
155
156    //   - a vbptr if the class adds its own
157    if (layout.hasOwnVBPtr()) {
158      addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
159    }
160  }
161
162  // Add fields.
163  for (auto field : record->fields()) {
164    auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
165    if (field->isBitField()) {
166      addBitFieldData(field, begin, fieldOffsetInBits);
167    } else {
168      addTypedData(field->getType(),
169              begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
170    }
171  }
172
173  // Add "late" C++ data:
174  if (cxxRecord) {
175    //   - virtual bases
176    for (auto &vbaseSpecifier : cxxRecord->vbases()) {
177      auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
178      addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
179    }
180  }
181}
182
183void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
184                                       CharUnits recordBegin,
185                                       uint64_t bitfieldBitBegin) {
186  assert(bitfield->isBitField());
187  auto &ctx = CGM.getContext();
188  auto width = bitfield->getBitWidthValue(ctx);
189
190  // We can ignore zero-width bit-fields.
191  if (width == 0) return;
192
193  // toCharUnitsFromBits rounds down.
194  CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
195
196  // Find the offset of the last byte that is partially occupied by the
197  // bit-field; since we otherwise expect exclusive ends, the end is the
198  // next byte.
199  uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
200  CharUnits bitfieldByteEnd =
201    ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
202  addOpaqueData(recordBegin + bitfieldByteBegin,
203                recordBegin + bitfieldByteEnd);
204}
205
206void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
207  assert(type && "didn't provide type for typed data");
208  addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
209}
210
211void SwiftAggLowering::addTypedData(llvm::Type *type,
212                                    CharUnits begin, CharUnits end) {
213  assert(type && "didn't provide type for typed data");
214  assert(getTypeStoreSize(CGM, type) == end - begin);
215
216  // Legalize vector types.
217  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
218    SmallVector<llvm::Type*, 4> componentTys;
219    legalizeVectorType(CGM, end - begin, vecTy, componentTys);
220    assert(componentTys.size() >= 1);
221
222    // Walk the initial components.
223    for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
224      llvm::Type *componentTy = componentTys[i];
225      auto componentSize = getTypeStoreSize(CGM, componentTy);
226      assert(componentSize < end - begin);
227      addLegalTypedData(componentTy, begin, begin + componentSize);
228      begin += componentSize;
229    }
230
231    return addLegalTypedData(componentTys.back(), begin, end);
232  }
233
234  // Legalize integer types.
235  if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
236    if (!isLegalIntegerType(CGM, intTy))
237      return addOpaqueData(begin, end);
238  }
239
240  // All other types should be legal.
241  return addLegalTypedData(type, begin, end);
242}
243
244void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
245                                         CharUnits begin, CharUnits end) {
246  // Require the type to be naturally aligned.
247  if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
248
249    // Try splitting vector types.
250    if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
251      auto split = splitLegalVectorType(CGM, end - begin, vecTy);
252      auto eltTy = split.first;
253      auto numElts = split.second;
254
255      auto eltSize = (end - begin) / numElts;
256      assert(eltSize == getTypeStoreSize(CGM, eltTy));
257      for (size_t i = 0, e = numElts; i != e; ++i) {
258        addLegalTypedData(eltTy, begin, begin + eltSize);
259        begin += eltSize;
260      }
261      assert(begin == end);
262      return;
263    }
264
265    return addOpaqueData(begin, end);
266  }
267
268  addEntry(type, begin, end);
269}
270
271void SwiftAggLowering::addEntry(llvm::Type *type,
272                                CharUnits begin, CharUnits end) {
273  assert((!type ||
274          (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
275         "cannot add aggregate-typed data");
276  assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
277
278  // Fast path: we can just add entries to the end.
279  if (Entries.empty() || Entries.back().End <= begin) {
280    Entries.push_back({begin, end, type});
281    return;
282  }
283
284  // Find the first existing entry that ends after the start of the new data.
285  // TODO: do a binary search if Entries is big enough for it to matter.
286  size_t index = Entries.size() - 1;
287  while (index != 0) {
288    if (Entries[index - 1].End <= begin) break;
289    --index;
290  }
291
292  // The entry ends after the start of the new data.
293  // If the entry starts after the end of the new data, there's no conflict.
294  if (Entries[index].Begin >= end) {
295    // This insertion is potentially O(n), but the way we generally build
296    // these layouts makes that unlikely to matter: we'd need a union of
297    // several very large types.
298    Entries.insert(Entries.begin() + index, {begin, end, type});
299    return;
300  }
301
302  // Otherwise, the ranges overlap.  The new range might also overlap
303  // with later ranges.
304restartAfterSplit:
305
306  // Simplest case: an exact overlap.
307  if (Entries[index].Begin == begin && Entries[index].End == end) {
308    // If the types match exactly, great.
309    if (Entries[index].Type == type) return;
310
311    // If either type is opaque, make the entry opaque and return.
312    if (Entries[index].Type == nullptr) {
313      return;
314    } else if (type == nullptr) {
315      Entries[index].Type = nullptr;
316      return;
317    }
318
319    // If they disagree in an ABI-agnostic way, just resolve the conflict
320    // arbitrarily.
321    if (auto entryType = getCommonType(Entries[index].Type, type)) {
322      Entries[index].Type = entryType;
323      return;
324    }
325
326    // Otherwise, make the entry opaque.
327    Entries[index].Type = nullptr;
328    return;
329  }
330
331  // Okay, we have an overlapping conflict of some sort.
332
333  // If we have a vector type, split it.
334  if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
335    auto eltTy = vecTy->getElementType();
336    CharUnits eltSize =
337        (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
338    assert(eltSize == getTypeStoreSize(CGM, eltTy));
339    for (unsigned i = 0,
340                  e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
341         i != e; ++i) {
342      addEntry(eltTy, begin, begin + eltSize);
343      begin += eltSize;
344    }
345    assert(begin == end);
346    return;
347  }
348
349  // If the entry is a vector type, split it and try again.
350  if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
351    splitVectorEntry(index);
352    goto restartAfterSplit;
353  }
354
355  // Okay, we have no choice but to make the existing entry opaque.
356
357  Entries[index].Type = nullptr;
358
359  // Stretch the start of the entry to the beginning of the range.
360  if (begin < Entries[index].Begin) {
361    Entries[index].Begin = begin;
362    assert(index == 0 || begin >= Entries[index - 1].End);
363  }
364
365  // Stretch the end of the entry to the end of the range; but if we run
366  // into the start of the next entry, just leave the range there and repeat.
367  while (end > Entries[index].End) {
368    assert(Entries[index].Type == nullptr);
369
370    // If the range doesn't overlap the next entry, we're done.
371    if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
372      Entries[index].End = end;
373      break;
374    }
375
376    // Otherwise, stretch to the start of the next entry.
377    Entries[index].End = Entries[index + 1].Begin;
378
379    // Continue with the next entry.
380    index++;
381
382    // This entry needs to be made opaque if it is not already.
383    if (Entries[index].Type == nullptr)
384      continue;
385
386    // Split vector entries unless we completely subsume them.
387    if (Entries[index].Type->isVectorTy() &&
388        end < Entries[index].End) {
389      splitVectorEntry(index);
390    }
391
392    // Make the entry opaque.
393    Entries[index].Type = nullptr;
394  }
395}
396
397/// Replace the entry of vector type at offset 'index' with a sequence
398/// of its component vectors.
399void SwiftAggLowering::splitVectorEntry(unsigned index) {
400  auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
401  auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
402
403  auto eltTy = split.first;
404  CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
405  auto numElts = split.second;
406  Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
407
408  CharUnits begin = Entries[index].Begin;
409  for (unsigned i = 0; i != numElts; ++i) {
410    Entries[index].Type = eltTy;
411    Entries[index].Begin = begin;
412    Entries[index].End = begin + eltSize;
413    begin += eltSize;
414  }
415}
416
417/// Given a power-of-two unit size, return the offset of the aligned unit
418/// of that size which contains the given offset.
419///
420/// In other words, round down to the nearest multiple of the unit size.
421static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
422  assert(isPowerOf2(unitSize.getQuantity()));
423  auto unitMask = ~(unitSize.getQuantity() - 1);
424  return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
425}
426
427static bool areBytesInSameUnit(CharUnits first, CharUnits second,
428                               CharUnits chunkSize) {
429  return getOffsetAtStartOfUnit(first, chunkSize)
430      == getOffsetAtStartOfUnit(second, chunkSize);
431}
432
433static bool isMergeableEntryType(llvm::Type *type) {
434  // Opaquely-typed memory is always mergeable.
435  if (type == nullptr) return true;
436
437  // Pointers and integers are always mergeable.  In theory we should not
438  // merge pointers, but (1) it doesn't currently matter in practice because
439  // the chunk size is never greater than the size of a pointer and (2)
440  // Swift IRGen uses integer types for a lot of things that are "really"
441  // just storing pointers (like Optional<SomePointer>).  If we ever have a
442  // target that would otherwise combine pointers, we should put some effort
443  // into fixing those cases in Swift IRGen and then call out pointer types
444  // here.
445
446  // Floating-point and vector types should never be merged.
447  // Most such types are too large and highly-aligned to ever trigger merging
448  // in practice, but it's important for the rule to cover at least 'half'
449  // and 'float', as well as things like small vectors of 'i1' or 'i8'.
450  return (!type->isFloatingPointTy() && !type->isVectorTy());
451}
452
453bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
454                                          const StorageEntry &second,
455                                          CharUnits chunkSize) {
456  // Only merge entries that overlap the same chunk.  We test this first
457  // despite being a bit more expensive because this is the condition that
458  // tends to prevent merging.
459  if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
460                          chunkSize))
461    return false;
462
463  return (isMergeableEntryType(first.Type) &&
464          isMergeableEntryType(second.Type));
465}
466
467void SwiftAggLowering::finish() {
468  if (Entries.empty()) {
469    Finished = true;
470    return;
471  }
472
473  // We logically split the layout down into a series of chunks of this size,
474  // which is generally the size of a pointer.
475  const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
476
477  // First pass: if two entries should be merged, make them both opaque
478  // and stretch one to meet the next.
479  // Also, remember if there are any opaque entries.
480  bool hasOpaqueEntries = (Entries[0].Type == nullptr);
481  for (size_t i = 1, e = Entries.size(); i != e; ++i) {
482    if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
483      Entries[i - 1].Type = nullptr;
484      Entries[i].Type = nullptr;
485      Entries[i - 1].End = Entries[i].Begin;
486      hasOpaqueEntries = true;
487
488    } else if (Entries[i].Type == nullptr) {
489      hasOpaqueEntries = true;
490    }
491  }
492
493  // The rest of the algorithm leaves non-opaque entries alone, so if we
494  // have no opaque entries, we're done.
495  if (!hasOpaqueEntries) {
496    Finished = true;
497    return;
498  }
499
500  // Okay, move the entries to a temporary and rebuild Entries.
501  auto orig = std::move(Entries);
502  assert(Entries.empty());
503
504  for (size_t i = 0, e = orig.size(); i != e; ++i) {
505    // Just copy over non-opaque entries.
506    if (orig[i].Type != nullptr) {
507      Entries.push_back(orig[i]);
508      continue;
509    }
510
511    // Scan forward to determine the full extent of the next opaque range.
512    // We know from the first pass that only contiguous ranges will overlap
513    // the same aligned chunk.
514    auto begin = orig[i].Begin;
515    auto end = orig[i].End;
516    while (i + 1 != e &&
517           orig[i + 1].Type == nullptr &&
518           end == orig[i + 1].Begin) {
519      end = orig[i + 1].End;
520      i++;
521    }
522
523    // Add an entry per intersected chunk.
524    do {
525      // Find the smallest aligned storage unit in the maximal aligned
526      // storage unit containing 'begin' that contains all the bytes in
527      // the intersection between the range and this chunk.
528      CharUnits localBegin = begin;
529      CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
530      CharUnits chunkEnd = chunkBegin + chunkSize;
531      CharUnits localEnd = std::min(end, chunkEnd);
532
533      // Just do a simple loop over ever-increasing unit sizes.
534      CharUnits unitSize = CharUnits::One();
535      CharUnits unitBegin, unitEnd;
536      for (; ; unitSize *= 2) {
537        assert(unitSize <= chunkSize);
538        unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
539        unitEnd = unitBegin + unitSize;
540        if (unitEnd >= localEnd) break;
541      }
542
543      // Add an entry for this unit.
544      auto entryTy =
545        llvm::IntegerType::get(CGM.getLLVMContext(),
546                               CGM.getContext().toBits(unitSize));
547      Entries.push_back({unitBegin, unitEnd, entryTy});
548
549      // The next chunk starts where this chunk left off.
550      begin = localEnd;
551    } while (begin != end);
552  }
553
554  // Okay, finally finished.
555  Finished = true;
556}
557
558void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
559  assert(Finished && "haven't yet finished lowering");
560
561  for (auto &entry : Entries) {
562    callback(entry.Begin, entry.End, entry.Type);
563  }
564}
565
566std::pair<llvm::StructType*, llvm::Type*>
567SwiftAggLowering::getCoerceAndExpandTypes() const {
568  assert(Finished && "haven't yet finished lowering");
569
570  auto &ctx = CGM.getLLVMContext();
571
572  if (Entries.empty()) {
573    auto type = llvm::StructType::get(ctx);
574    return { type, type };
575  }
576
577  SmallVector<llvm::Type*, 8> elts;
578  CharUnits lastEnd = CharUnits::Zero();
579  bool hasPadding = false;
580  bool packed = false;
581  for (auto &entry : Entries) {
582    if (entry.Begin != lastEnd) {
583      auto paddingSize = entry.Begin - lastEnd;
584      assert(!paddingSize.isNegative());
585
586      auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
587                                          paddingSize.getQuantity());
588      elts.push_back(padding);
589      hasPadding = true;
590    }
591
592    if (!packed && !entry.Begin.isMultipleOf(
593          CharUnits::fromQuantity(
594            CGM.getDataLayout().getABITypeAlignment(entry.Type))))
595      packed = true;
596
597    elts.push_back(entry.Type);
598
599    lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
600    assert(entry.End <= lastEnd);
601  }
602
603  // We don't need to adjust 'packed' to deal with possible tail padding
604  // because we never do that kind of access through the coercion type.
605  auto coercionType = llvm::StructType::get(ctx, elts, packed);
606
607  llvm::Type *unpaddedType = coercionType;
608  if (hasPadding) {
609    elts.clear();
610    for (auto &entry : Entries) {
611      elts.push_back(entry.Type);
612    }
613    if (elts.size() == 1) {
614      unpaddedType = elts[0];
615    } else {
616      unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
617    }
618  } else if (Entries.size() == 1) {
619    unpaddedType = Entries[0].Type;
620  }
621
622  return { coercionType, unpaddedType };
623}
624
625bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
626  assert(Finished && "haven't yet finished lowering");
627
628  // Empty types don't need to be passed indirectly.
629  if (Entries.empty()) return false;
630
631  // Avoid copying the array of types when there's just a single element.
632  if (Entries.size() == 1) {
633    return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
634                                                           Entries.back().Type,
635                                                             asReturnValue);
636  }
637
638  SmallVector<llvm::Type*, 8> componentTys;
639  componentTys.reserve(Entries.size());
640  for (auto &entry : Entries) {
641    componentTys.push_back(entry.Type);
642  }
643  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
644                                                           asReturnValue);
645}
646
647bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
648                                     ArrayRef<llvm::Type*> componentTys,
649                                     bool asReturnValue) {
650  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
651                                                           asReturnValue);
652}
653
654CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
655  // Currently always the size of an ordinary pointer.
656  return CGM.getContext().toCharUnitsFromBits(
657           CGM.getContext().getTargetInfo().getPointerWidth(0));
658}
659
660CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
661  // For Swift's purposes, this is always just the store size of the type
662  // rounded up to a power of 2.
663  auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
664  if (!isPowerOf2(size)) {
665    size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
666  }
667  assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
668  return CharUnits::fromQuantity(size);
669}
670
671bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
672                                   llvm::IntegerType *intTy) {
673  auto size = intTy->getBitWidth();
674  switch (size) {
675  case 1:
676  case 8:
677  case 16:
678  case 32:
679  case 64:
680    // Just assume that the above are always legal.
681    return true;
682
683  case 128:
684    return CGM.getContext().getTargetInfo().hasInt128Type();
685
686  default:
687    return false;
688  }
689}
690
691bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
692                                  llvm::VectorType *vectorTy) {
693  return isLegalVectorType(
694      CGM, vectorSize, vectorTy->getElementType(),
695      cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
696}
697
698bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
699                                  llvm::Type *eltTy, unsigned numElts) {
700  assert(numElts > 1 && "illegal vector length");
701  return getSwiftABIInfo(CGM)
702           .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
703}
704
705std::pair<llvm::Type*, unsigned>
706swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
707                                llvm::VectorType *vectorTy) {
708  auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
709  auto eltTy = vectorTy->getElementType();
710
711  // Try to split the vector type in half.
712  if (numElts >= 4 && isPowerOf2(numElts)) {
713    if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
714      return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
715  }
716
717  return {eltTy, numElts};
718}
719
720void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
721                                   llvm::VectorType *origVectorTy,
722                             llvm::SmallVectorImpl<llvm::Type*> &components) {
723  // If it's already a legal vector type, use it.
724  if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
725    components.push_back(origVectorTy);
726    return;
727  }
728
729  // Try to split the vector into legal subvectors.
730  auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
731  auto eltTy = origVectorTy->getElementType();
732  assert(numElts != 1);
733
734  // The largest size that we're still considering making subvectors of.
735  // Always a power of 2.
736  unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
737  unsigned candidateNumElts = 1U << logCandidateNumElts;
738  assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
739
740  // Minor optimization: don't check the legality of this exact size twice.
741  if (candidateNumElts == numElts) {
742    logCandidateNumElts--;
743    candidateNumElts >>= 1;
744  }
745
746  CharUnits eltSize = (origVectorSize / numElts);
747  CharUnits candidateSize = eltSize * candidateNumElts;
748
749  // The sensibility of this algorithm relies on the fact that we never
750  // have a legal non-power-of-2 vector size without having the power of 2
751  // also be legal.
752  while (logCandidateNumElts > 0) {
753    assert(candidateNumElts == 1U << logCandidateNumElts);
754    assert(candidateNumElts <= numElts);
755    assert(candidateSize == eltSize * candidateNumElts);
756
757    // Skip illegal vector sizes.
758    if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
759      logCandidateNumElts--;
760      candidateNumElts /= 2;
761      candidateSize /= 2;
762      continue;
763    }
764
765    // Add the right number of vectors of this size.
766    auto numVecs = numElts >> logCandidateNumElts;
767    components.append(numVecs,
768                      llvm::FixedVectorType::get(eltTy, candidateNumElts));
769    numElts -= (numVecs << logCandidateNumElts);
770
771    if (numElts == 0) return;
772
773    // It's possible that the number of elements remaining will be legal.
774    // This can happen with e.g. <7 x float> when <3 x float> is legal.
775    // This only needs to be separately checked if it's not a power of 2.
776    if (numElts > 2 && !isPowerOf2(numElts) &&
777        isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
778      components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
779      return;
780    }
781
782    // Bring vecSize down to something no larger than numElts.
783    do {
784      logCandidateNumElts--;
785      candidateNumElts /= 2;
786      candidateSize /= 2;
787    } while (candidateNumElts > numElts);
788  }
789
790  // Otherwise, just append a bunch of individual elements.
791  components.append(numElts, eltTy);
792}
793
794bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
795                                         const RecordDecl *record) {
796  // FIXME: should we not rely on the standard computation in Sema, just in
797  // case we want to diverge from the platform ABI (e.g. on targets where
798  // that uses the MSVC rule)?
799  return !record->canPassInRegisters();
800}
801
802static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
803                                       bool forReturn,
804                                       CharUnits alignmentForIndirect) {
805  if (lowering.empty()) {
806    return ABIArgInfo::getIgnore();
807  } else if (lowering.shouldPassIndirectly(forReturn)) {
808    return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
809  } else {
810    auto types = lowering.getCoerceAndExpandTypes();
811    return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
812  }
813}
814
815static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
816                               bool forReturn) {
817  if (auto recordType = dyn_cast<RecordType>(type)) {
818    auto record = recordType->getDecl();
819    auto &layout = CGM.getContext().getASTRecordLayout(record);
820
821    if (mustPassRecordIndirectly(CGM, record))
822      return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
823
824    SwiftAggLowering lowering(CGM);
825    lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
826    lowering.finish();
827
828    return classifyExpandedType(lowering, forReturn, layout.getAlignment());
829  }
830
831  // Just assume that all of our target ABIs can support returning at least
832  // two integer or floating-point values.
833  if (isa<ComplexType>(type)) {
834    return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
835  }
836
837  // Vector types may need to be legalized.
838  if (isa<VectorType>(type)) {
839    SwiftAggLowering lowering(CGM);
840    lowering.addTypedData(type, CharUnits::Zero());
841    lowering.finish();
842
843    CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
844    return classifyExpandedType(lowering, forReturn, alignment);
845  }
846
847  // Member pointer types need to be expanded, but it's a simple form of
848  // expansion that 'Direct' can handle.  Note that CanBeFlattened should be
849  // true for this to work.
850
851  // 'void' needs to be ignored.
852  if (type->isVoidType()) {
853    return ABIArgInfo::getIgnore();
854  }
855
856  // Everything else can be passed directly.
857  return ABIArgInfo::getDirect();
858}
859
860ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
861  return classifyType(CGM, type, /*forReturn*/ true);
862}
863
864ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
865                                           CanQualType type) {
866  return classifyType(CGM, type, /*forReturn*/ false);
867}
868
869void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
870  auto &retInfo = FI.getReturnInfo();
871  retInfo = classifyReturnType(CGM, FI.getReturnType());
872
873  for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
874    auto &argInfo = FI.arg_begin()[i];
875    argInfo.info = classifyArgumentType(CGM, argInfo.type);
876  }
877}
878
879// Is swifterror lowered to a register by the target ABI.
880bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
881  return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
882}
883