LowerTypeTests.h revision 360784
179697Snon//===- LowerTypeTests.h - type metadata lowering pass -----------*- C++ -*-===// 267468Snon// 367468Snon// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 467468Snon// See https://llvm.org/LICENSE.txt for license information. 567468Snon// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 679697Snon// 767468Snon//===----------------------------------------------------------------------===// 8139749Simp// 967468Snon// This file defines parts of the type test lowering pass implementation that 1079697Snon// may be usefully unit tested. 1167468Snon// 1279697Snon//===----------------------------------------------------------------------===// 1367468Snon 1467468Snon#ifndef LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H 1567468Snon#define LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H 1667468Snon 1767468Snon#include "llvm/ADT/SmallVector.h" 1867468Snon#include "llvm/IR/PassManager.h" 1967468Snon#include <cstdint> 2067468Snon#include <cstring> 2167468Snon#include <limits> 2267468Snon#include <set> 2367468Snon#include <vector> 2467468Snon 2567468Snonnamespace llvm { 2667468Snon 2767468Snonclass Module; 2867468Snonclass ModuleSummaryIndex; 2967468Snonclass raw_ostream; 3067468Snon 3167468Snonnamespace lowertypetests { 3267468Snon 3367468Snonstruct BitSetInfo { 3467468Snon // The indices of the set bits in the bitset. 3567468Snon std::set<uint64_t> Bits; 3667468Snon 3767468Snon // The byte offset into the combined global represented by the bitset. 3867468Snon uint64_t ByteOffset; 3967468Snon 40119420Sobrien // The size of the bitset in bits. 41119420Sobrien uint64_t BitSize; 42119420Sobrien 4367468Snon // Log2 alignment of the bit set relative to the combined global. 4467468Snon // For example, a log2 alignment of 3 means that bits in the bitset 4567468Snon // represent addresses 8 bytes apart. 4667468Snon unsigned AlignLog2; 4767468Snon 4867468Snon bool isSingleOffset() const { 4967468Snon return Bits.size() == 1; 5067468Snon } 5167468Snon 52274760Sjhb bool isAllOnes() const { 5367468Snon return Bits.size() == BitSize; 5467468Snon } 5567468Snon 5667468Snon bool containsGlobalOffset(uint64_t Offset) const; 5767468Snon 5867468Snon void print(raw_ostream &OS) const; 5967468Snon}; 6067468Snon 6167468Snonstruct BitSetBuilder { 6267468Snon SmallVector<uint64_t, 16> Offsets; 6367468Snon uint64_t Min = std::numeric_limits<uint64_t>::max(); 6467468Snon uint64_t Max = 0; 6567468Snon 66250460Seadler BitSetBuilder() = default; 6767468Snon 6867468Snon void addOffset(uint64_t Offset) { 6967468Snon if (Min > Offset) 7067468Snon Min = Offset; 7167468Snon if (Max < Offset) 7267468Snon Max = Offset; 7367468Snon 7467468Snon Offsets.push_back(Offset); 7579697Snon } 7667468Snon 7779697Snon BitSetInfo build(); 7867468Snon}; 7979697Snon 8079697Snon/// This class implements a layout algorithm for globals referenced by bit sets 8179697Snon/// that tries to keep members of small bit sets together. This can 8279697Snon/// significantly reduce bit set sizes in many cases. 8379697Snon/// 8467468Snon/// It works by assembling fragments of layout from sets of referenced globals. 8567468Snon/// Each set of referenced globals causes the algorithm to create a new 8667468Snon/// fragment, which is assembled by appending each referenced global in the set 8767468Snon/// into the fragment. If a referenced global has already been referenced by an 8867468Snon/// fragment created earlier, we instead delete that fragment and append its 8967468Snon/// contents into the fragment we are assembling. 9067468Snon/// 9167468Snon/// By starting with the smallest fragments, we minimize the size of the 9267468Snon/// fragments that are copied into larger fragments. This is most intuitively 9367468Snon/// thought about when considering the case where the globals are virtual tables 9489093Smsmith/// and the bit sets represent their derived classes: in a single inheritance 9567468Snon/// hierarchy, the optimum layout would involve a depth-first search of the 9667468Snon/// class hierarchy (and in fact the computed layout ends up looking a lot like 9767468Snon/// a DFS), but a naive DFS would not work well in the presence of multiple 9889093Smsmith/// inheritance. This aspect of the algorithm ends up fitting smaller 9979697Snon/// hierarchies inside larger ones where that would be beneficial. 10079697Snon/// 10167468Snon/// For example, consider this class hierarchy: 10267468Snon/// 10379697Snon/// A B 10467468Snon/// \ / | \ 10567468Snon/// C D E 10667468Snon/// 10779697Snon/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and 10867468Snon/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our 10979697Snon/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to 11079697Snon/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to 11179697Snon/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows: 11279697Snon/// 11379697Snon/// Add bsC, fragments {{C}} 11479697Snon/// Add bsD, fragments {{C}, {D}} 11579697Snon/// Add bsE, fragments {{C}, {D}, {E}} 11679697Snon/// Add bsA, fragments {{A, C}, {D}, {E}} 11767468Snon/// Add bsB, fragments {{B, A, C, D, E}} 11867468Snon/// 11967468Snon/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3 12067468Snon/// fewer) objects, at the cost of bsB needing to cover 1 more object. 12167468Snon/// 12267468Snon/// The bit set lowering pass assigns an object index to each object that needs 12392739Salfred/// to be laid out, and calls addFragment for each bit set passing the object 12492739Salfred/// indices of its referenced globals. It then assembles a layout from the 12592739Salfred/// computed layout in the Fragments field. 12692739Salfredstruct GlobalLayoutBuilder { 12792739Salfred /// The computed layout. Each element of this vector contains a fragment of 12892739Salfred /// layout (which may be empty) consisting of object indices. 12992739Salfred std::vector<std::vector<uint64_t>> Fragments; 13092739Salfred 13192739Salfred /// Mapping from object index to fragment index. 13292739Salfred std::vector<uint64_t> FragmentMap; 13392739Salfred 13492739Salfred GlobalLayoutBuilder(uint64_t NumObjects) 13592739Salfred : Fragments(1), FragmentMap(NumObjects) {} 13692739Salfred 13792739Salfred /// Add F to the layout while trying to keep its indices contiguous. 13892739Salfred /// If a previously seen fragment uses any of F's indices, that 13992739Salfred /// fragment will be laid out inside F. 14092739Salfred void addFragment(const std::set<uint64_t> &F); 14192739Salfred}; 14292739Salfred 14392739Salfred/// This class is used to build a byte array containing overlapping bit sets. By 14492739Salfred/// loading from indexed offsets into the byte array and applying a mask, a 14592739Salfred/// program can test bits from the bit set with a relatively short instruction 14667468Snon/// sequence. For example, suppose we have 15 bit sets to lay out: 14767468Snon/// 14867468Snon/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits), 14967468Snon/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits), 15073025Snon/// L (4 bits), M (3 bits), N (2 bits), O (1 bit) 15179697Snon/// 15267468Snon/// These bits can be laid out in a 16-byte array like this: 15367468Snon/// 15479697Snon/// Byte Offset 15579697Snon/// 0123456789ABCDEF 15667468Snon/// Bit 15767468Snon/// 7 HHHHHHHHHIIIIIII 15867468Snon/// 6 GGGGGGGGGGJJJJJJ 15967468Snon/// 5 FFFFFFFFFFFKKKKK 16079697Snon/// 4 EEEEEEEEEEEELLLL 16167468Snon/// 3 DDDDDDDDDDDDDMMM 16267468Snon/// 2 CCCCCCCCCCCCCCNN 16367468Snon/// 1 BBBBBBBBBBBBBBBO 16467468Snon/// 0 AAAAAAAAAAAAAAAA 16567468Snon/// 16667468Snon/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to 16779697Snon/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done 16867468Snon/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM. 16967468Snon/// 170189004Srdivacky/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array, 17167468Snon/// because for one thing it gives us better packing (the more bins there are, 17267468Snon/// the less evenly they will be filled), and for another, the instruction 173274760Sjhb/// sequences can be slightly shorter, both on x86 and ARM. 17467468Snonstruct ByteArrayBuilder { 17567468Snon /// The byte array built so far. 17667468Snon std::vector<uint8_t> Bytes; 17779697Snon 17879697Snon enum { BitsPerByte = 8 }; 17979697Snon 18079697Snon /// The number of bytes allocated so far for each of the bits. 18179697Snon uint64_t BitAllocs[BitsPerByte]; 18279697Snon 18379697Snon ByteArrayBuilder() { 18479697Snon memset(BitAllocs, 0, sizeof(BitAllocs)); 185274760Sjhb } 186274760Sjhb 18779697Snon /// Allocate BitSize bits in the byte array where Bits contains the bits to 18879697Snon /// set. AllocByteOffset is set to the offset within the byte array and 18979697Snon /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest 19079697Snon /// Processing Time) multiprocessor scheduling algorithm to lay out the bits 19179697Snon /// efficiently; the pass allocates bit sets in decreasing size order. 19279697Snon void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize, 19379697Snon uint64_t &AllocByteOffset, uint8_t &AllocMask); 19479697Snon}; 195274760Sjhb 19679697Snonbool isJumpTableCanonical(Function *F); 19779697Snon 19879697Snon} // end namespace lowertypetests 19979697Snon 20079697Snonclass LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> { 20179697Snonpublic: 20279697Snon ModuleSummaryIndex *ExportSummary; 20379697Snon const ModuleSummaryIndex *ImportSummary; 20479697Snon LowerTypeTestsPass(ModuleSummaryIndex *ExportSummary, 20579697Snon const ModuleSummaryIndex *ImportSummary) 20679697Snon : ExportSummary(ExportSummary), ImportSummary(ImportSummary) {} 20779697Snon PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); 20879697Snon}; 20979697Snon 21079697Snon} // end namespace llvm 21179697Snon 21279697Snon#endif // LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H 21379697Snon