1//===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing profiling data for clang's
10// instrumentation based PGO and coverage.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ProfileData/InstrProfWriter.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/IR/ProfileSummary.h"
18#include "llvm/ProfileData/InstrProf.h"
19#include "llvm/ProfileData/MemProf.h"
20#include "llvm/ProfileData/ProfileCommon.h"
21#include "llvm/Support/Endian.h"
22#include "llvm/Support/EndianStream.h"
23#include "llvm/Support/Error.h"
24#include "llvm/Support/MemoryBuffer.h"
25#include "llvm/Support/OnDiskHashTable.h"
26#include "llvm/Support/raw_ostream.h"
27#include <cstdint>
28#include <memory>
29#include <string>
30#include <tuple>
31#include <utility>
32#include <vector>
33
34using namespace llvm;
35
36// A struct to define how the data stream should be patched. For Indexed
37// profiling, only uint64_t data type is needed.
38struct PatchItem {
39  uint64_t Pos; // Where to patch.
40  uint64_t *D;  // Pointer to an array of source data.
41  int N;        // Number of elements in \c D array.
42};
43
44namespace llvm {
45
46// A wrapper class to abstract writer stream with support of bytes
47// back patching.
48class ProfOStream {
49public:
50  ProfOStream(raw_fd_ostream &FD)
51      : IsFDOStream(true), OS(FD), LE(FD, support::little) {}
52  ProfOStream(raw_string_ostream &STR)
53      : IsFDOStream(false), OS(STR), LE(STR, support::little) {}
54
55  uint64_t tell() { return OS.tell(); }
56  void write(uint64_t V) { LE.write<uint64_t>(V); }
57  void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
58
59  // \c patch can only be called when all data is written and flushed.
60  // For raw_string_ostream, the patch is done on the target string
61  // directly and it won't be reflected in the stream's internal buffer.
62  void patch(PatchItem *P, int NItems) {
63    using namespace support;
64
65    if (IsFDOStream) {
66      raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
67      const uint64_t LastPos = FDOStream.tell();
68      for (int K = 0; K < NItems; K++) {
69        FDOStream.seek(P[K].Pos);
70        for (int I = 0; I < P[K].N; I++)
71          write(P[K].D[I]);
72      }
73      // Reset the stream to the last position after patching so that users
74      // don't accidentally overwrite data. This makes it consistent with
75      // the string stream below which replaces the data directly.
76      FDOStream.seek(LastPos);
77    } else {
78      raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
79      std::string &Data = SOStream.str(); // with flush
80      for (int K = 0; K < NItems; K++) {
81        for (int I = 0; I < P[K].N; I++) {
82          uint64_t Bytes = endian::byte_swap<uint64_t, little>(P[K].D[I]);
83          Data.replace(P[K].Pos + I * sizeof(uint64_t), sizeof(uint64_t),
84                       (const char *)&Bytes, sizeof(uint64_t));
85        }
86      }
87    }
88  }
89
90  // If \c OS is an instance of \c raw_fd_ostream, this field will be
91  // true. Otherwise, \c OS will be an raw_string_ostream.
92  bool IsFDOStream;
93  raw_ostream &OS;
94  support::endian::Writer LE;
95};
96
97class InstrProfRecordWriterTrait {
98public:
99  using key_type = StringRef;
100  using key_type_ref = StringRef;
101
102  using data_type = const InstrProfWriter::ProfilingData *const;
103  using data_type_ref = const InstrProfWriter::ProfilingData *const;
104
105  using hash_value_type = uint64_t;
106  using offset_type = uint64_t;
107
108  support::endianness ValueProfDataEndianness = support::little;
109  InstrProfSummaryBuilder *SummaryBuilder;
110  InstrProfSummaryBuilder *CSSummaryBuilder;
111
112  InstrProfRecordWriterTrait() = default;
113
114  static hash_value_type ComputeHash(key_type_ref K) {
115    return IndexedInstrProf::ComputeHash(K);
116  }
117
118  static std::pair<offset_type, offset_type>
119  EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
120    using namespace support;
121
122    endian::Writer LE(Out, little);
123
124    offset_type N = K.size();
125    LE.write<offset_type>(N);
126
127    offset_type M = 0;
128    for (const auto &ProfileData : *V) {
129      const InstrProfRecord &ProfRecord = ProfileData.second;
130      M += sizeof(uint64_t); // The function hash
131      M += sizeof(uint64_t); // The size of the Counts vector
132      M += ProfRecord.Counts.size() * sizeof(uint64_t);
133
134      // Value data
135      M += ValueProfData::getSize(ProfileData.second);
136    }
137    LE.write<offset_type>(M);
138
139    return std::make_pair(N, M);
140  }
141
142  void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) {
143    Out.write(K.data(), N);
144  }
145
146  void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) {
147    using namespace support;
148
149    endian::Writer LE(Out, little);
150    for (const auto &ProfileData : *V) {
151      const InstrProfRecord &ProfRecord = ProfileData.second;
152      if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
153        CSSummaryBuilder->addRecord(ProfRecord);
154      else
155        SummaryBuilder->addRecord(ProfRecord);
156
157      LE.write<uint64_t>(ProfileData.first); // Function hash
158      LE.write<uint64_t>(ProfRecord.Counts.size());
159      for (uint64_t I : ProfRecord.Counts)
160        LE.write<uint64_t>(I);
161
162      // Write value data
163      std::unique_ptr<ValueProfData> VDataPtr =
164          ValueProfData::serializeFrom(ProfileData.second);
165      uint32_t S = VDataPtr->getSize();
166      VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
167      Out.write((const char *)VDataPtr.get(), S);
168    }
169  }
170};
171
172} // end namespace llvm
173
174InstrProfWriter::InstrProfWriter(bool Sparse)
175    : Sparse(Sparse), InfoObj(new InstrProfRecordWriterTrait()) {}
176
177InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
178
179// Internal interface for testing purpose only.
180void InstrProfWriter::setValueProfDataEndianness(
181    support::endianness Endianness) {
182  InfoObj->ValueProfDataEndianness = Endianness;
183}
184
185void InstrProfWriter::setOutputSparse(bool Sparse) {
186  this->Sparse = Sparse;
187}
188
189void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
190                                function_ref<void(Error)> Warn) {
191  auto Name = I.Name;
192  auto Hash = I.Hash;
193  addRecord(Name, Hash, std::move(I), Weight, Warn);
194}
195
196void InstrProfWriter::overlapRecord(NamedInstrProfRecord &&Other,
197                                    OverlapStats &Overlap,
198                                    OverlapStats &FuncLevelOverlap,
199                                    const OverlapFuncFilters &FuncFilter) {
200  auto Name = Other.Name;
201  auto Hash = Other.Hash;
202  Other.accumulateCounts(FuncLevelOverlap.Test);
203  if (FunctionData.find(Name) == FunctionData.end()) {
204    Overlap.addOneUnique(FuncLevelOverlap.Test);
205    return;
206  }
207  if (FuncLevelOverlap.Test.CountSum < 1.0f) {
208    Overlap.Overlap.NumEntries += 1;
209    return;
210  }
211  auto &ProfileDataMap = FunctionData[Name];
212  bool NewFunc;
213  ProfilingData::iterator Where;
214  std::tie(Where, NewFunc) =
215      ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
216  if (NewFunc) {
217    Overlap.addOneMismatch(FuncLevelOverlap.Test);
218    return;
219  }
220  InstrProfRecord &Dest = Where->second;
221
222  uint64_t ValueCutoff = FuncFilter.ValueCutoff;
223  if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
224    ValueCutoff = 0;
225
226  Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
227}
228
229void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
230                                InstrProfRecord &&I, uint64_t Weight,
231                                function_ref<void(Error)> Warn) {
232  auto &ProfileDataMap = FunctionData[Name];
233
234  bool NewFunc;
235  ProfilingData::iterator Where;
236  std::tie(Where, NewFunc) =
237      ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
238  InstrProfRecord &Dest = Where->second;
239
240  auto MapWarn = [&](instrprof_error E) {
241    Warn(make_error<InstrProfError>(E));
242  };
243
244  if (NewFunc) {
245    // We've never seen a function with this name and hash, add it.
246    Dest = std::move(I);
247    if (Weight > 1)
248      Dest.scale(Weight, 1, MapWarn);
249  } else {
250    // We're updating a function we've seen before.
251    Dest.merge(I, Weight, MapWarn);
252  }
253
254  Dest.sortValueData();
255}
256
257void InstrProfWriter::addMemProfRecord(
258    const Function::GUID Id, const memprof::IndexedMemProfRecord &Record) {
259  auto Result = MemProfRecordData.insert({Id, Record});
260  // If we inserted a new record then we are done.
261  if (Result.second) {
262    return;
263  }
264  memprof::IndexedMemProfRecord &Existing = Result.first->second;
265  Existing.merge(Record);
266}
267
268bool InstrProfWriter::addMemProfFrame(const memprof::FrameId Id,
269                                      const memprof::Frame &Frame,
270                                      function_ref<void(Error)> Warn) {
271  auto Result = MemProfFrameData.insert({Id, Frame});
272  // If a mapping already exists for the current frame id and it does not
273  // match the new mapping provided then reset the existing contents and bail
274  // out. We don't support the merging of memprof data whose Frame -> Id
275  // mapping across profiles is inconsistent.
276  if (!Result.second && Result.first->second != Frame) {
277    Warn(make_error<InstrProfError>(instrprof_error::malformed,
278                                    "frame to id mapping mismatch"));
279    return false;
280  }
281  return true;
282}
283
284void InstrProfWriter::addBinaryIds(ArrayRef<llvm::object::BuildID> BIs) {
285  llvm::append_range(BinaryIds, BIs);
286}
287
288void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
289                                             function_ref<void(Error)> Warn) {
290  for (auto &I : IPW.FunctionData)
291    for (auto &Func : I.getValue())
292      addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
293
294  BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
295  for (auto &I : IPW.BinaryIds)
296    addBinaryIds(I);
297
298  MemProfFrameData.reserve(IPW.MemProfFrameData.size());
299  for (auto &I : IPW.MemProfFrameData) {
300    // If we weren't able to add the frame mappings then it doesn't make sense
301    // to try to merge the records from this profile.
302    if (!addMemProfFrame(I.first, I.second, Warn))
303      return;
304  }
305
306  MemProfRecordData.reserve(IPW.MemProfRecordData.size());
307  for (auto &I : IPW.MemProfRecordData) {
308    addMemProfRecord(I.first, I.second);
309  }
310}
311
312bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
313  if (!Sparse)
314    return true;
315  for (const auto &Func : PD) {
316    const InstrProfRecord &IPR = Func.second;
317    if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
318      return true;
319  }
320  return false;
321}
322
323static void setSummary(IndexedInstrProf::Summary *TheSummary,
324                       ProfileSummary &PS) {
325  using namespace IndexedInstrProf;
326
327  const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
328  TheSummary->NumSummaryFields = Summary::NumKinds;
329  TheSummary->NumCutoffEntries = Res.size();
330  TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
331  TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
332  TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
333  TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
334  TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
335  TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
336  for (unsigned I = 0; I < Res.size(); I++)
337    TheSummary->setEntry(I, Res[I]);
338}
339
340Error InstrProfWriter::writeImpl(ProfOStream &OS) {
341  using namespace IndexedInstrProf;
342  using namespace support;
343
344  OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator;
345
346  InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs);
347  InfoObj->SummaryBuilder = &ISB;
348  InstrProfSummaryBuilder CSISB(ProfileSummaryBuilder::DefaultCutoffs);
349  InfoObj->CSSummaryBuilder = &CSISB;
350
351  // Populate the hash table generator.
352  for (const auto &I : FunctionData)
353    if (shouldEncodeData(I.getValue()))
354      Generator.insert(I.getKey(), &I.getValue());
355
356  // Write the header.
357  IndexedInstrProf::Header Header;
358  Header.Magic = IndexedInstrProf::Magic;
359  Header.Version = IndexedInstrProf::ProfVersion::CurrentVersion;
360  if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
361    Header.Version |= VARIANT_MASK_IR_PROF;
362  if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
363    Header.Version |= VARIANT_MASK_CSIR_PROF;
364  if (static_cast<bool>(ProfileKind &
365                        InstrProfKind::FunctionEntryInstrumentation))
366    Header.Version |= VARIANT_MASK_INSTR_ENTRY;
367  if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
368    Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
369  if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
370    Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
371  if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
372    Header.Version |= VARIANT_MASK_MEMPROF;
373
374  Header.Unused = 0;
375  Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
376  Header.HashOffset = 0;
377  Header.MemProfOffset = 0;
378  Header.BinaryIdOffset = 0;
379  int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
380
381  // Only write out all the fields except 'HashOffset', 'MemProfOffset' and
382  // 'BinaryIdOffset'. We need to remember the offset of these fields to allow
383  // back patching later.
384  for (int I = 0; I < N - 3; I++)
385    OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
386
387  // Save the location of Header.HashOffset field in \c OS.
388  uint64_t HashTableStartFieldOffset = OS.tell();
389  // Reserve the space for HashOffset field.
390  OS.write(0);
391
392  // Save the location of MemProf profile data. This is stored in two parts as
393  // the schema and as a separate on-disk chained hashtable.
394  uint64_t MemProfSectionOffset = OS.tell();
395  // Reserve space for the MemProf table field to be patched later if this
396  // profile contains memory profile information.
397  OS.write(0);
398
399  // Save the location of binary ids section.
400  uint64_t BinaryIdSectionOffset = OS.tell();
401  // Reserve space for the BinaryIdOffset field to be patched later if this
402  // profile contains binary ids.
403  OS.write(0);
404
405  // Reserve space to write profile summary data.
406  uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
407  uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
408  // Remember the summary offset.
409  uint64_t SummaryOffset = OS.tell();
410  for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
411    OS.write(0);
412  uint64_t CSSummaryOffset = 0;
413  uint64_t CSSummarySize = 0;
414  if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
415    CSSummaryOffset = OS.tell();
416    CSSummarySize = SummarySize / sizeof(uint64_t);
417    for (unsigned I = 0; I < CSSummarySize; I++)
418      OS.write(0);
419  }
420
421  // Write the hash table.
422  uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
423
424  // Write the MemProf profile data if we have it. This includes a simple schema
425  // with the format described below followed by the hashtable:
426  // uint64_t RecordTableOffset = RecordTableGenerator.Emit
427  // uint64_t FramePayloadOffset = Stream offset before emitting the frame table
428  // uint64_t FrameTableOffset = FrameTableGenerator.Emit
429  // uint64_t Num schema entries
430  // uint64_t Schema entry 0
431  // uint64_t Schema entry 1
432  // ....
433  // uint64_t Schema entry N - 1
434  // OnDiskChainedHashTable MemProfRecordData
435  // OnDiskChainedHashTable MemProfFrameData
436  uint64_t MemProfSectionStart = 0;
437  if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
438    MemProfSectionStart = OS.tell();
439    OS.write(0ULL); // Reserve space for the memprof record table offset.
440    OS.write(0ULL); // Reserve space for the memprof frame payload offset.
441    OS.write(0ULL); // Reserve space for the memprof frame table offset.
442
443    auto Schema = memprof::PortableMemInfoBlock::getSchema();
444    OS.write(static_cast<uint64_t>(Schema.size()));
445    for (const auto Id : Schema) {
446      OS.write(static_cast<uint64_t>(Id));
447    }
448
449    auto RecordWriter = std::make_unique<memprof::RecordWriterTrait>();
450    RecordWriter->Schema = &Schema;
451    OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
452        RecordTableGenerator;
453    for (auto &I : MemProfRecordData) {
454      // Insert the key (func hash) and value (memprof record).
455      RecordTableGenerator.insert(I.first, I.second);
456    }
457
458    uint64_t RecordTableOffset =
459        RecordTableGenerator.Emit(OS.OS, *RecordWriter);
460
461    uint64_t FramePayloadOffset = OS.tell();
462
463    auto FrameWriter = std::make_unique<memprof::FrameWriterTrait>();
464    OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
465        FrameTableGenerator;
466    for (auto &I : MemProfFrameData) {
467      // Insert the key (frame id) and value (frame contents).
468      FrameTableGenerator.insert(I.first, I.second);
469    }
470
471    uint64_t FrameTableOffset = FrameTableGenerator.Emit(OS.OS, *FrameWriter);
472
473    PatchItem PatchItems[] = {
474        {MemProfSectionStart, &RecordTableOffset, 1},
475        {MemProfSectionStart + sizeof(uint64_t), &FramePayloadOffset, 1},
476        {MemProfSectionStart + 2 * sizeof(uint64_t), &FrameTableOffset, 1},
477    };
478    OS.patch(PatchItems, 3);
479  }
480
481  // BinaryIdSection has two parts:
482  // 1. uint64_t BinaryIdsSectionSize
483  // 2. list of binary ids that consist of:
484  //    a. uint64_t BinaryIdLength
485  //    b. uint8_t  BinaryIdData
486  //    c. uint8_t  Padding (if necessary)
487  uint64_t BinaryIdSectionStart = OS.tell();
488  // Calculate size of binary section.
489  uint64_t BinaryIdsSectionSize = 0;
490
491  // Remove duplicate binary ids.
492  llvm::sort(BinaryIds);
493  BinaryIds.erase(std::unique(BinaryIds.begin(), BinaryIds.end()),
494                  BinaryIds.end());
495
496  for (auto BI : BinaryIds) {
497    // Increment by binary id length data type size.
498    BinaryIdsSectionSize += sizeof(uint64_t);
499    // Increment by binary id data length, aligned to 8 bytes.
500    BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
501  }
502  // Write binary ids section size.
503  OS.write(BinaryIdsSectionSize);
504
505  for (auto BI : BinaryIds) {
506    uint64_t BILen = BI.size();
507    // Write binary id length.
508    OS.write(BILen);
509    // Write binary id data.
510    for (unsigned K = 0; K < BILen; K++)
511      OS.writeByte(BI[K]);
512    // Write padding if necessary.
513    uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
514    for (unsigned K = 0; K < PaddingSize; K++)
515      OS.writeByte(0);
516  }
517
518  // Allocate space for data to be serialized out.
519  std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
520      IndexedInstrProf::allocSummary(SummarySize);
521  // Compute the Summary and copy the data to the data
522  // structure to be serialized out (to disk or buffer).
523  std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
524  setSummary(TheSummary.get(), *PS);
525  InfoObj->SummaryBuilder = nullptr;
526
527  // For Context Sensitive summary.
528  std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
529  if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
530    TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
531    std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
532    setSummary(TheCSSummary.get(), *CSPS);
533  }
534  InfoObj->CSSummaryBuilder = nullptr;
535
536  // Now do the final patch:
537  PatchItem PatchItems[] = {
538      // Patch the Header.HashOffset field.
539      {HashTableStartFieldOffset, &HashTableStart, 1},
540      // Patch the Header.MemProfOffset (=0 for profiles without MemProf
541      // data).
542      {MemProfSectionOffset, &MemProfSectionStart, 1},
543      // Patch the Header.BinaryIdSectionOffset.
544      {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
545      // Patch the summary data.
546      {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
547       (int)(SummarySize / sizeof(uint64_t))},
548      {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
549       (int)CSSummarySize}};
550
551  OS.patch(PatchItems, std::size(PatchItems));
552
553  for (const auto &I : FunctionData)
554    for (const auto &F : I.getValue())
555      if (Error E = validateRecord(F.second))
556        return E;
557
558  return Error::success();
559}
560
561Error InstrProfWriter::write(raw_fd_ostream &OS) {
562  // Write the hash table.
563  ProfOStream POS(OS);
564  return writeImpl(POS);
565}
566
567std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
568  std::string Data;
569  raw_string_ostream OS(Data);
570  ProfOStream POS(OS);
571  // Write the hash table.
572  if (Error E = writeImpl(POS))
573    return nullptr;
574  // Return this in an aligned memory buffer.
575  return MemoryBuffer::getMemBufferCopy(Data);
576}
577
578static const char *ValueProfKindStr[] = {
579#define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
580#include "llvm/ProfileData/InstrProfData.inc"
581};
582
583Error InstrProfWriter::validateRecord(const InstrProfRecord &Func) {
584  for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
585    uint32_t NS = Func.getNumValueSites(VK);
586    if (!NS)
587      continue;
588    for (uint32_t S = 0; S < NS; S++) {
589      uint32_t ND = Func.getNumValueDataForSite(VK, S);
590      std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
591      DenseSet<uint64_t> SeenValues;
592      for (uint32_t I = 0; I < ND; I++)
593        if ((VK != IPVK_IndirectCallTarget) && !SeenValues.insert(VD[I].Value).second)
594          return make_error<InstrProfError>(instrprof_error::invalid_prof);
595    }
596  }
597
598  return Error::success();
599}
600
601void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
602                                        const InstrProfRecord &Func,
603                                        InstrProfSymtab &Symtab,
604                                        raw_fd_ostream &OS) {
605  OS << Name << "\n";
606  OS << "# Func Hash:\n" << Hash << "\n";
607  OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
608  OS << "# Counter Values:\n";
609  for (uint64_t Count : Func.Counts)
610    OS << Count << "\n";
611
612  uint32_t NumValueKinds = Func.getNumValueKinds();
613  if (!NumValueKinds) {
614    OS << "\n";
615    return;
616  }
617
618  OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
619  for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
620    uint32_t NS = Func.getNumValueSites(VK);
621    if (!NS)
622      continue;
623    OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
624    OS << "# NumValueSites:\n" << NS << "\n";
625    for (uint32_t S = 0; S < NS; S++) {
626      uint32_t ND = Func.getNumValueDataForSite(VK, S);
627      OS << ND << "\n";
628      std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
629      for (uint32_t I = 0; I < ND; I++) {
630        if (VK == IPVK_IndirectCallTarget)
631          OS << Symtab.getFuncNameOrExternalSymbol(VD[I].Value) << ":"
632             << VD[I].Count << "\n";
633        else
634          OS << VD[I].Value << ":" << VD[I].Count << "\n";
635      }
636    }
637  }
638
639  OS << "\n";
640}
641
642Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
643  // Check CS first since it implies an IR level profile.
644  if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
645    OS << "# CSIR level Instrumentation Flag\n:csir\n";
646  else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
647    OS << "# IR level Instrumentation Flag\n:ir\n";
648
649  if (static_cast<bool>(ProfileKind &
650                        InstrProfKind::FunctionEntryInstrumentation))
651    OS << "# Always instrument the function entry block\n:entry_first\n";
652  InstrProfSymtab Symtab;
653
654  using FuncPair = detail::DenseMapPair<uint64_t, InstrProfRecord>;
655  using RecordType = std::pair<StringRef, FuncPair>;
656  SmallVector<RecordType, 4> OrderedFuncData;
657
658  for (const auto &I : FunctionData) {
659    if (shouldEncodeData(I.getValue())) {
660      if (Error E = Symtab.addFuncName(I.getKey()))
661        return E;
662      for (const auto &Func : I.getValue())
663        OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
664    }
665  }
666
667  llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
668    return std::tie(A.first, A.second.first) <
669           std::tie(B.first, B.second.first);
670  });
671
672  for (const auto &record : OrderedFuncData) {
673    const StringRef &Name = record.first;
674    const FuncPair &Func = record.second;
675    writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
676  }
677
678  for (const auto &record : OrderedFuncData) {
679    const FuncPair &Func = record.second;
680    if (Error E = validateRecord(Func.second))
681      return E;
682  }
683
684  return Error::success();
685}
686