1//===- UnwindInfoSection.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "UnwindInfoSection.h"
10#include "InputSection.h"
11#include "Layout.h"
12#include "OutputSection.h"
13#include "OutputSegment.h"
14#include "SymbolTable.h"
15#include "Symbols.h"
16#include "SyntheticSections.h"
17#include "Target.h"
18
19#include "lld/Common/ErrorHandler.h"
20#include "lld/Common/Memory.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/BinaryFormat/MachO.h"
24#include "llvm/Support/Parallel.h"
25
26#include "mach-o/compact_unwind_encoding.h"
27
28#include <numeric>
29
30using namespace llvm;
31using namespace llvm::MachO;
32using namespace llvm::support::endian;
33using namespace lld;
34using namespace lld::macho;
35
36#define COMMON_ENCODINGS_MAX 127
37#define COMPACT_ENCODINGS_MAX 256
38
39#define SECOND_LEVEL_PAGE_BYTES 4096
40#define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
41#define REGULAR_SECOND_LEVEL_ENTRIES_MAX                                       \
42  ((SECOND_LEVEL_PAGE_BYTES -                                                  \
43    sizeof(unwind_info_regular_second_level_page_header)) /                    \
44   sizeof(unwind_info_regular_second_level_entry))
45#define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX                                    \
46  ((SECOND_LEVEL_PAGE_BYTES -                                                  \
47    sizeof(unwind_info_compressed_second_level_page_header)) /                 \
48   sizeof(uint32_t))
49
50#define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
51#define COMPRESSED_ENTRY_FUNC_OFFSET_MASK                                      \
52  UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
53
54static_assert(static_cast<uint32_t>(UNWIND_X86_64_DWARF_SECTION_OFFSET) ==
55                  static_cast<uint32_t>(UNWIND_ARM64_DWARF_SECTION_OFFSET) &&
56              static_cast<uint32_t>(UNWIND_X86_64_DWARF_SECTION_OFFSET) ==
57                  static_cast<uint32_t>(UNWIND_X86_DWARF_SECTION_OFFSET));
58
59constexpr uint64_t DWARF_SECTION_OFFSET = UNWIND_X86_64_DWARF_SECTION_OFFSET;
60
61// Compact Unwind format is a Mach-O evolution of DWARF Unwind that
62// optimizes space and exception-time lookup.  Most DWARF unwind
63// entries can be replaced with Compact Unwind entries, but the ones
64// that cannot are retained in DWARF form.
65//
66// This comment will address macro-level organization of the pre-link
67// and post-link compact unwind tables. For micro-level organization
68// pertaining to the bitfield layout of the 32-bit compact unwind
69// entries, see libunwind/include/mach-o/compact_unwind_encoding.h
70//
71// Important clarifying factoids:
72//
73// * __LD,__compact_unwind is the compact unwind format for compiler
74// output and linker input. It is never a final output. It could be
75// an intermediate output with the `-r` option which retains relocs.
76//
77// * __TEXT,__unwind_info is the compact unwind format for final
78// linker output. It is never an input.
79//
80// * __TEXT,__eh_frame is the DWARF format for both linker input and output.
81//
82// * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
83// level) by ascending address, and the pages are referenced by an
84// index (1st level) in the section header.
85//
86// * Following the headers in __TEXT,__unwind_info, the bulk of the
87// section contains a vector of compact unwind entries
88// `{functionOffset, encoding}` sorted by ascending `functionOffset`.
89// Adjacent entries with the same encoding can be folded to great
90// advantage, achieving a 3-order-of-magnitude reduction in the
91// number of entries.
92//
93// Refer to the definition of unwind_info_section_header in
94// compact_unwind_encoding.h for an overview of the format we are encoding
95// here.
96
97// TODO(gkm): how do we align the 2nd-level pages?
98
99// The various fields in the on-disk representation of each compact unwind
100// entry.
101#define FOR_EACH_CU_FIELD(DO)                                                  \
102  DO(Ptr, functionAddress)                                                     \
103  DO(uint32_t, functionLength)                                                 \
104  DO(compact_unwind_encoding_t, encoding)                                      \
105  DO(Ptr, personality)                                                         \
106  DO(Ptr, lsda)
107
108CREATE_LAYOUT_CLASS(CompactUnwind, FOR_EACH_CU_FIELD);
109
110#undef FOR_EACH_CU_FIELD
111
112// LLD's internal representation of a compact unwind entry.
113struct CompactUnwindEntry {
114  uint64_t functionAddress;
115  uint32_t functionLength;
116  compact_unwind_encoding_t encoding;
117  Symbol *personality;
118  InputSection *lsda;
119};
120
121using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>;
122
123struct SecondLevelPage {
124  uint32_t kind;
125  size_t entryIndex;
126  size_t entryCount;
127  size_t byteCount;
128  std::vector<compact_unwind_encoding_t> localEncodings;
129  EncodingMap localEncodingIndexes;
130};
131
132// UnwindInfoSectionImpl allows us to avoid cluttering our header file with a
133// lengthy definition of UnwindInfoSection.
134class UnwindInfoSectionImpl final : public UnwindInfoSection {
135public:
136  UnwindInfoSectionImpl() : cuLayout(target->wordSize) {}
137  uint64_t getSize() const override { return unwindInfoSize; }
138  void prepare() override;
139  void finalize() override;
140  void writeTo(uint8_t *buf) const override;
141
142private:
143  void prepareRelocations(ConcatInputSection *);
144  void relocateCompactUnwind(std::vector<CompactUnwindEntry> &);
145  void encodePersonalities();
146  Symbol *canonicalizePersonality(Symbol *);
147
148  uint64_t unwindInfoSize = 0;
149  SmallVector<decltype(symbols)::value_type, 0> symbolsVec;
150  CompactUnwindLayout cuLayout;
151  std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
152  EncodingMap commonEncodingIndexes;
153  // The entries here will be in the same order as their originating symbols
154  // in symbolsVec.
155  std::vector<CompactUnwindEntry> cuEntries;
156  // Indices into the cuEntries vector.
157  std::vector<size_t> cuIndices;
158  std::vector<Symbol *> personalities;
159  SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
160      personalityTable;
161  // Indices into cuEntries for CUEs with a non-null LSDA.
162  std::vector<size_t> entriesWithLsda;
163  // Map of cuEntries index to an index within the LSDA array.
164  DenseMap<size_t, uint32_t> lsdaIndex;
165  std::vector<SecondLevelPage> secondLevelPages;
166  uint64_t level2PagesOffset = 0;
167  // The highest-address function plus its size. The unwinder needs this to
168  // determine the address range that is covered by unwind info.
169  uint64_t cueEndBoundary = 0;
170};
171
172UnwindInfoSection::UnwindInfoSection()
173    : SyntheticSection(segment_names::text, section_names::unwindInfo) {
174  align = 4;
175}
176
177// Record function symbols that may need entries emitted in __unwind_info, which
178// stores unwind data for address ranges.
179//
180// Note that if several adjacent functions have the same unwind encoding and
181// personality function and no LSDA, they share one unwind entry. For this to
182// work, functions without unwind info need explicit "no unwind info" unwind
183// entries -- else the unwinder would think they have the unwind info of the
184// closest function with unwind info right before in the image. Thus, we add
185// function symbols for each unique address regardless of whether they have
186// associated unwind info.
187void UnwindInfoSection::addSymbol(const Defined *d) {
188  if (d->unwindEntry)
189    allEntriesAreOmitted = false;
190  // We don't yet know the final output address of this symbol, but we know that
191  // they are uniquely determined by a combination of the isec and value, so
192  // we use that as the key here.
193  auto p = symbols.insert({{d->isec, d->value}, d});
194  // If we have multiple symbols at the same address, only one of them can have
195  // an associated unwind entry.
196  if (!p.second && d->unwindEntry) {
197    assert(p.first->second == d || !p.first->second->unwindEntry);
198    p.first->second = d;
199  }
200}
201
202void UnwindInfoSectionImpl::prepare() {
203  // This iteration needs to be deterministic, since prepareRelocations may add
204  // entries to the GOT. Hence the use of a MapVector for
205  // UnwindInfoSection::symbols.
206  for (const Defined *d : make_second_range(symbols))
207    if (d->unwindEntry) {
208      if (d->unwindEntry->getName() == section_names::compactUnwind) {
209        prepareRelocations(d->unwindEntry);
210      } else {
211        // We don't have to add entries to the GOT here because FDEs have
212        // explicit GOT relocations, so Writer::scanRelocations() will add those
213        // GOT entries. However, we still need to canonicalize the personality
214        // pointers (like prepareRelocations() does for CU entries) in order
215        // to avoid overflowing the 3-personality limit.
216        FDE &fde = cast<ObjFile>(d->getFile())->fdes[d->unwindEntry];
217        fde.personality = canonicalizePersonality(fde.personality);
218      }
219    }
220}
221
222// Compact unwind relocations have different semantics, so we handle them in a
223// separate code path from regular relocations. First, we do not wish to add
224// rebase opcodes for __LD,__compact_unwind, because that section doesn't
225// actually end up in the final binary. Second, personality pointers always
226// reside in the GOT and must be treated specially.
227void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) {
228  assert(!isec->shouldOmitFromOutput() &&
229         "__compact_unwind section should not be omitted");
230
231  // FIXME: Make this skip relocations for CompactUnwindEntries that
232  // point to dead-stripped functions. That might save some amount of
233  // work. But since there are usually just few personality functions
234  // that are referenced from many places, at least some of them likely
235  // live, it wouldn't reduce number of got entries.
236  for (size_t i = 0; i < isec->relocs.size(); ++i) {
237    Reloc &r = isec->relocs[i];
238    assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
239    // Since compact unwind sections aren't part of the inputSections vector,
240    // they don't get canonicalized by scanRelocations(), so we have to do the
241    // canonicalization here.
242    if (auto *referentIsec = r.referent.dyn_cast<InputSection *>())
243      r.referent = referentIsec->canonical();
244
245    // Functions and LSDA entries always reside in the same object file as the
246    // compact unwind entries that references them, and thus appear as section
247    // relocs. There is no need to prepare them. We only prepare relocs for
248    // personality functions.
249    if (r.offset != cuLayout.personalityOffset)
250      continue;
251
252    if (auto *s = r.referent.dyn_cast<Symbol *>()) {
253      // Personality functions are nearly always system-defined (e.g.,
254      // ___gxx_personality_v0 for C++) and relocated as dylib symbols.  When an
255      // application provides its own personality function, it might be
256      // referenced by an extern Defined symbol reloc, or a local section reloc.
257      if (auto *defined = dyn_cast<Defined>(s)) {
258        // XXX(vyng) This is a special case for handling duplicate personality
259        // symbols. Note that LD64's behavior is a bit different and it is
260        // inconsistent with how symbol resolution usually work
261        //
262        // So we've decided not to follow it. Instead, simply pick the symbol
263        // with the same name from the symbol table to replace the local one.
264        //
265        // (See discussions/alternatives already considered on D107533)
266        if (!defined->isExternal())
267          if (Symbol *sym = symtab->find(defined->getName()))
268            if (!sym->isLazy())
269              r.referent = s = sym;
270      }
271      if (auto *undefined = dyn_cast<Undefined>(s)) {
272        treatUndefinedSymbol(*undefined, isec, r.offset);
273        // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
274        if (isa<Undefined>(s))
275          continue;
276      }
277
278      // Similar to canonicalizePersonality(), but we also register a GOT entry.
279      if (auto *defined = dyn_cast<Defined>(s)) {
280        // Check if we have created a synthetic symbol at the same address.
281        Symbol *&personality =
282            personalityTable[{defined->isec, defined->value}];
283        if (personality == nullptr) {
284          personality = defined;
285          in.got->addEntry(defined);
286        } else if (personality != defined) {
287          r.referent = personality;
288        }
289        continue;
290      }
291
292      assert(isa<DylibSymbol>(s));
293      in.got->addEntry(s);
294      continue;
295    }
296
297    if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
298      assert(!isCoalescedWeak(referentIsec));
299      // Personality functions can be referenced via section relocations
300      // if they live in the same object file. Create placeholder synthetic
301      // symbols for them in the GOT.
302      Symbol *&s = personalityTable[{referentIsec, r.addend}];
303      if (s == nullptr) {
304        // This runs after dead stripping, so the noDeadStrip argument does not
305        // matter.
306        s = make<Defined>("<internal>", /*file=*/nullptr, referentIsec,
307                          r.addend, /*size=*/0, /*isWeakDef=*/false,
308                          /*isExternal=*/false, /*isPrivateExtern=*/false,
309                          /*includeInSymtab=*/true,
310                          /*isReferencedDynamically=*/false,
311                          /*noDeadStrip=*/false);
312        s->used = true;
313        in.got->addEntry(s);
314      }
315      r.referent = s;
316      r.addend = 0;
317    }
318  }
319}
320
321Symbol *UnwindInfoSectionImpl::canonicalizePersonality(Symbol *personality) {
322  if (auto *defined = dyn_cast_or_null<Defined>(personality)) {
323    // Check if we have created a synthetic symbol at the same address.
324    Symbol *&synth = personalityTable[{defined->isec, defined->value}];
325    if (synth == nullptr)
326      synth = defined;
327    else if (synth != defined)
328      return synth;
329  }
330  return personality;
331}
332
333// We need to apply the relocations to the pre-link compact unwind section
334// before converting it to post-link form. There should only be absolute
335// relocations here: since we are not emitting the pre-link CU section, there
336// is no source address to make a relative location meaningful.
337void UnwindInfoSectionImpl::relocateCompactUnwind(
338    std::vector<CompactUnwindEntry> &cuEntries) {
339  parallelFor(0, symbolsVec.size(), [&](size_t i) {
340    CompactUnwindEntry &cu = cuEntries[i];
341    const Defined *d = symbolsVec[i].second;
342    cu.functionAddress = d->getVA();
343    if (!d->unwindEntry)
344      return;
345
346    // If we have DWARF unwind info, create a slimmed-down CU entry that points
347    // to it.
348    if (d->unwindEntry->getName() == section_names::ehFrame) {
349      // The unwinder will look for the DWARF entry starting at the hint,
350      // assuming the hint points to a valid CFI record start. If it
351      // fails to find the record, it proceeds in a linear search through the
352      // contiguous CFI records from the hint until the end of the section.
353      // Ideally, in the case where the offset is too large to be encoded, we
354      // would instead encode the largest possible offset to a valid CFI record,
355      // but since we don't keep track of that, just encode zero -- the start of
356      // the section is always the start of a CFI record.
357      uint64_t dwarfOffsetHint =
358          d->unwindEntry->outSecOff <= DWARF_SECTION_OFFSET
359              ? d->unwindEntry->outSecOff
360              : 0;
361      cu.encoding = target->modeDwarfEncoding | dwarfOffsetHint;
362      const FDE &fde = cast<ObjFile>(d->getFile())->fdes[d->unwindEntry];
363      cu.functionLength = fde.funcLength;
364      // Omit the DWARF personality from compact-unwind entry so that we
365      // don't need to encode it.
366      cu.personality = nullptr;
367      cu.lsda = fde.lsda;
368      return;
369    }
370
371    assert(d->unwindEntry->getName() == section_names::compactUnwind);
372
373    auto buf = reinterpret_cast<const uint8_t *>(d->unwindEntry->data.data()) -
374               target->wordSize;
375    cu.functionLength =
376        support::endian::read32le(buf + cuLayout.functionLengthOffset);
377    cu.encoding = support::endian::read32le(buf + cuLayout.encodingOffset);
378    for (const Reloc &r : d->unwindEntry->relocs) {
379      if (r.offset == cuLayout.personalityOffset)
380        cu.personality = r.referent.get<Symbol *>();
381      else if (r.offset == cuLayout.lsdaOffset)
382        cu.lsda = r.getReferentInputSection();
383    }
384  });
385}
386
387// There should only be a handful of unique personality pointers, so we can
388// encode them as 2-bit indices into a small array.
389void UnwindInfoSectionImpl::encodePersonalities() {
390  for (size_t idx : cuIndices) {
391    CompactUnwindEntry &cu = cuEntries[idx];
392    if (cu.personality == nullptr)
393      continue;
394    // Linear search is fast enough for a small array.
395    auto it = find(personalities, cu.personality);
396    uint32_t personalityIndex; // 1-based index
397    if (it != personalities.end()) {
398      personalityIndex = std::distance(personalities.begin(), it) + 1;
399    } else {
400      personalities.push_back(cu.personality);
401      personalityIndex = personalities.size();
402    }
403    cu.encoding |=
404        personalityIndex << llvm::countr_zero(
405            static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK));
406  }
407  if (personalities.size() > 3)
408    error("too many personalities (" + Twine(personalities.size()) +
409          ") for compact unwind to encode");
410}
411
412static bool canFoldEncoding(compact_unwind_encoding_t encoding) {
413  // From compact_unwind_encoding.h:
414  //  UNWIND_X86_64_MODE_STACK_IND:
415  //  A "frameless" (RBP not used as frame pointer) function large constant
416  //  stack size.  This case is like the previous, except the stack size is too
417  //  large to encode in the compact unwind encoding.  Instead it requires that
418  //  the function contains "subq $nnnnnnnn,RSP" in its prolog.  The compact
419  //  encoding contains the offset to the nnnnnnnn value in the function in
420  //  UNWIND_X86_64_FRAMELESS_STACK_SIZE.
421  // Since this means the unwinder has to look at the `subq` in the function
422  // of the unwind info's unwind address, two functions that have identical
423  // unwind info can't be folded if it's using this encoding since both
424  // entries need unique addresses.
425  static_assert(static_cast<uint32_t>(UNWIND_X86_64_MODE_STACK_IND) ==
426                static_cast<uint32_t>(UNWIND_X86_MODE_STACK_IND));
427  if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) &&
428      (encoding & UNWIND_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) {
429    // FIXME: Consider passing in the two function addresses and getting
430    // their two stack sizes off the `subq` and only returning false if they're
431    // actually different.
432    return false;
433  }
434  return true;
435}
436
437// Scan the __LD,__compact_unwind entries and compute the space needs of
438// __TEXT,__unwind_info and __TEXT,__eh_frame.
439void UnwindInfoSectionImpl::finalize() {
440  if (symbols.empty())
441    return;
442
443  // At this point, the address space for __TEXT,__text has been
444  // assigned, so we can relocate the __LD,__compact_unwind entries
445  // into a temporary buffer. Relocation is necessary in order to sort
446  // the CU entries by function address. Sorting is necessary so that
447  // we can fold adjacent CU entries with identical encoding+personality
448  // and without any LSDA. Folding is necessary because it reduces the
449  // number of CU entries by as much as 3 orders of magnitude!
450  cuEntries.resize(symbols.size());
451  // The "map" part of the symbols MapVector was only needed for deduplication
452  // in addSymbol(). Now that we are done adding, move the contents to a plain
453  // std::vector for indexed access.
454  symbolsVec = symbols.takeVector();
455  relocateCompactUnwind(cuEntries);
456
457  // Rather than sort & fold the 32-byte entries directly, we create a
458  // vector of indices to entries and sort & fold that instead.
459  cuIndices.resize(cuEntries.size());
460  std::iota(cuIndices.begin(), cuIndices.end(), 0);
461  llvm::sort(cuIndices, [&](size_t a, size_t b) {
462    return cuEntries[a].functionAddress < cuEntries[b].functionAddress;
463  });
464
465  // Record the ending boundary before we fold the entries.
466  cueEndBoundary = cuEntries[cuIndices.back()].functionAddress +
467                   cuEntries[cuIndices.back()].functionLength;
468
469  // Fold adjacent entries with matching encoding+personality and without LSDA
470  // We use three iterators on the same cuIndices to fold in-situ:
471  // (1) `foldBegin` is the first of a potential sequence of matching entries
472  // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
473  // The semi-open interval [ foldBegin .. foldEnd ) contains a range
474  // entries that can be folded into a single entry and written to ...
475  // (3) `foldWrite`
476  auto foldWrite = cuIndices.begin();
477  for (auto foldBegin = cuIndices.begin(); foldBegin < cuIndices.end();) {
478    auto foldEnd = foldBegin;
479    // Common LSDA encodings (e.g. for C++ and Objective-C) contain offsets from
480    // a base address. The base address is normally not contained directly in
481    // the LSDA, and in that case, the personality function treats the starting
482    // address of the function (which is computed by the unwinder) as the base
483    // address and interprets the LSDA accordingly. The unwinder computes the
484    // starting address of a function as the address associated with its CU
485    // entry. For this reason, we cannot fold adjacent entries if they have an
486    // LSDA, because folding would make the unwinder compute the wrong starting
487    // address for the functions with the folded entries, which in turn would
488    // cause the personality function to misinterpret the LSDA for those
489    // functions. In the very rare case where the base address is encoded
490    // directly in the LSDA, two functions at different addresses would
491    // necessarily have different LSDAs, so their CU entries would not have been
492    // folded anyway.
493    while (++foldEnd < cuIndices.end() &&
494           cuEntries[*foldBegin].encoding == cuEntries[*foldEnd].encoding &&
495           !cuEntries[*foldBegin].lsda && !cuEntries[*foldEnd].lsda &&
496           // If we've gotten to this point, we don't have an LSDA, which should
497           // also imply that we don't have a personality function, since in all
498           // likelihood a personality function needs the LSDA to do anything
499           // useful. It can be technically valid to have a personality function
500           // and no LSDA though (e.g. the C++ personality __gxx_personality_v0
501           // is just a no-op without LSDA), so we still check for personality
502           // function equivalence to handle that case.
503           cuEntries[*foldBegin].personality ==
504               cuEntries[*foldEnd].personality &&
505           canFoldEncoding(cuEntries[*foldEnd].encoding))
506      ;
507    *foldWrite++ = *foldBegin;
508    foldBegin = foldEnd;
509  }
510  cuIndices.erase(foldWrite, cuIndices.end());
511
512  encodePersonalities();
513
514  // Count frequencies of the folded encodings
515  EncodingMap encodingFrequencies;
516  for (size_t idx : cuIndices)
517    encodingFrequencies[cuEntries[idx].encoding]++;
518
519  // Make a vector of encodings, sorted by descending frequency
520  for (const auto &frequency : encodingFrequencies)
521    commonEncodings.emplace_back(frequency);
522  llvm::sort(commonEncodings,
523             [](const std::pair<compact_unwind_encoding_t, size_t> &a,
524                const std::pair<compact_unwind_encoding_t, size_t> &b) {
525               if (a.second == b.second)
526                 // When frequencies match, secondarily sort on encoding
527                 // to maintain parity with validate-unwind-info.py
528                 return a.first > b.first;
529               return a.second > b.second;
530             });
531
532  // Truncate the vector to 127 elements.
533  // Common encoding indexes are limited to 0..126, while encoding
534  // indexes 127..255 are local to each second-level page
535  if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
536    commonEncodings.resize(COMMON_ENCODINGS_MAX);
537
538  // Create a map from encoding to common-encoding-table index
539  for (size_t i = 0; i < commonEncodings.size(); i++)
540    commonEncodingIndexes[commonEncodings[i].first] = i;
541
542  // Split folded encodings into pages, where each page is limited by ...
543  // (a) 4 KiB capacity
544  // (b) 24-bit difference between first & final function address
545  // (c) 8-bit compact-encoding-table index,
546  //     for which 0..126 references the global common-encodings table,
547  //     and 127..255 references a local per-second-level-page table.
548  // First we try the compact format and determine how many entries fit.
549  // If more entries fit in the regular format, we use that.
550  for (size_t i = 0; i < cuIndices.size();) {
551    size_t idx = cuIndices[i];
552    secondLevelPages.emplace_back();
553    SecondLevelPage &page = secondLevelPages.back();
554    page.entryIndex = i;
555    uint64_t functionAddressMax =
556        cuEntries[idx].functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
557    size_t n = commonEncodings.size();
558    size_t wordsRemaining =
559        SECOND_LEVEL_PAGE_WORDS -
560        sizeof(unwind_info_compressed_second_level_page_header) /
561            sizeof(uint32_t);
562    while (wordsRemaining >= 1 && i < cuIndices.size()) {
563      idx = cuIndices[i];
564      const CompactUnwindEntry *cuPtr = &cuEntries[idx];
565      if (cuPtr->functionAddress >= functionAddressMax)
566        break;
567      if (commonEncodingIndexes.count(cuPtr->encoding) ||
568          page.localEncodingIndexes.count(cuPtr->encoding)) {
569        i++;
570        wordsRemaining--;
571      } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
572        page.localEncodings.emplace_back(cuPtr->encoding);
573        page.localEncodingIndexes[cuPtr->encoding] = n++;
574        i++;
575        wordsRemaining -= 2;
576      } else {
577        break;
578      }
579    }
580    page.entryCount = i - page.entryIndex;
581
582    // If this is not the final page, see if it's possible to fit more entries
583    // by using the regular format. This can happen when there are many unique
584    // encodings, and we saturated the local encoding table early.
585    if (i < cuIndices.size() &&
586        page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
587      page.kind = UNWIND_SECOND_LEVEL_REGULAR;
588      page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
589                                 cuIndices.size() - page.entryIndex);
590      i = page.entryIndex + page.entryCount;
591    } else {
592      page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
593    }
594  }
595
596  for (size_t idx : cuIndices) {
597    lsdaIndex[idx] = entriesWithLsda.size();
598    if (cuEntries[idx].lsda)
599      entriesWithLsda.push_back(idx);
600  }
601
602  // compute size of __TEXT,__unwind_info section
603  level2PagesOffset = sizeof(unwind_info_section_header) +
604                      commonEncodings.size() * sizeof(uint32_t) +
605                      personalities.size() * sizeof(uint32_t) +
606                      // The extra second-level-page entry is for the sentinel
607                      (secondLevelPages.size() + 1) *
608                          sizeof(unwind_info_section_header_index_entry) +
609                      entriesWithLsda.size() *
610                          sizeof(unwind_info_section_header_lsda_index_entry);
611  unwindInfoSize =
612      level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
613}
614
615// All inputs are relocated and output addresses are known, so write!
616
617void UnwindInfoSectionImpl::writeTo(uint8_t *buf) const {
618  assert(!cuIndices.empty() && "call only if there is unwind info");
619
620  // section header
621  auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
622  uip->version = 1;
623  uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
624  uip->commonEncodingsArrayCount = commonEncodings.size();
625  uip->personalityArraySectionOffset =
626      uip->commonEncodingsArraySectionOffset +
627      (uip->commonEncodingsArrayCount * sizeof(uint32_t));
628  uip->personalityArrayCount = personalities.size();
629  uip->indexSectionOffset = uip->personalityArraySectionOffset +
630                            (uip->personalityArrayCount * sizeof(uint32_t));
631  uip->indexCount = secondLevelPages.size() + 1;
632
633  // Common encodings
634  auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
635  for (const auto &encoding : commonEncodings)
636    *i32p++ = encoding.first;
637
638  // Personalities
639  for (const Symbol *personality : personalities)
640    *i32p++ = personality->getGotVA() - in.header->addr;
641
642  // FIXME: LD64 checks and warns aboutgaps or overlapse in cuEntries address
643  // ranges. We should do the same too
644
645  // Level-1 index
646  uint32_t lsdaOffset =
647      uip->indexSectionOffset +
648      uip->indexCount * sizeof(unwind_info_section_header_index_entry);
649  uint64_t l2PagesOffset = level2PagesOffset;
650  auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
651  for (const SecondLevelPage &page : secondLevelPages) {
652    size_t idx = cuIndices[page.entryIndex];
653    iep->functionOffset = cuEntries[idx].functionAddress - in.header->addr;
654    iep->secondLevelPagesSectionOffset = l2PagesOffset;
655    iep->lsdaIndexArraySectionOffset =
656        lsdaOffset + lsdaIndex.lookup(idx) *
657                         sizeof(unwind_info_section_header_lsda_index_entry);
658    iep++;
659    l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
660  }
661  // Level-1 sentinel
662  // XXX(vyng): Note that LD64 adds +1 here.
663  // Unsure whether it's a bug or it's their workaround for something else.
664  // See comments from https://reviews.llvm.org/D138320.
665  iep->functionOffset = cueEndBoundary - in.header->addr;
666  iep->secondLevelPagesSectionOffset = 0;
667  iep->lsdaIndexArraySectionOffset =
668      lsdaOffset + entriesWithLsda.size() *
669                       sizeof(unwind_info_section_header_lsda_index_entry);
670  iep++;
671
672  // LSDAs
673  auto *lep =
674      reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep);
675  for (size_t idx : entriesWithLsda) {
676    const CompactUnwindEntry &cu = cuEntries[idx];
677    lep->lsdaOffset = cu.lsda->getVA(/*off=*/0) - in.header->addr;
678    lep->functionOffset = cu.functionAddress - in.header->addr;
679    lep++;
680  }
681
682  // Level-2 pages
683  auto *pp = reinterpret_cast<uint32_t *>(lep);
684  for (const SecondLevelPage &page : secondLevelPages) {
685    if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
686      uintptr_t functionAddressBase =
687          cuEntries[cuIndices[page.entryIndex]].functionAddress;
688      auto *p2p =
689          reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
690              pp);
691      p2p->kind = page.kind;
692      p2p->entryPageOffset =
693          sizeof(unwind_info_compressed_second_level_page_header);
694      p2p->entryCount = page.entryCount;
695      p2p->encodingsPageOffset =
696          p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
697      p2p->encodingsCount = page.localEncodings.size();
698      auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
699      for (size_t i = 0; i < page.entryCount; i++) {
700        const CompactUnwindEntry &cue =
701            cuEntries[cuIndices[page.entryIndex + i]];
702        auto it = commonEncodingIndexes.find(cue.encoding);
703        if (it == commonEncodingIndexes.end())
704          it = page.localEncodingIndexes.find(cue.encoding);
705        *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
706                (cue.functionAddress - functionAddressBase);
707      }
708      if (!page.localEncodings.empty())
709        memcpy(ep, page.localEncodings.data(),
710               page.localEncodings.size() * sizeof(uint32_t));
711    } else {
712      auto *p2p =
713          reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
714      p2p->kind = page.kind;
715      p2p->entryPageOffset =
716          sizeof(unwind_info_regular_second_level_page_header);
717      p2p->entryCount = page.entryCount;
718      auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
719      for (size_t i = 0; i < page.entryCount; i++) {
720        const CompactUnwindEntry &cue =
721            cuEntries[cuIndices[page.entryIndex + i]];
722        *ep++ = cue.functionAddress;
723        *ep++ = cue.encoding;
724      }
725    }
726    pp += SECOND_LEVEL_PAGE_WORDS;
727  }
728}
729
730UnwindInfoSection *macho::makeUnwindInfoSection() {
731  return make<UnwindInfoSectionImpl>();
732}
733