1//===-- common.cpp ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "gwp_asan/common.h"
10#include "gwp_asan/stack_trace_compressor.h"
11
12#include <assert.h>
13
14using AllocationMetadata = gwp_asan::AllocationMetadata;
15using Error = gwp_asan::Error;
16
17namespace gwp_asan {
18
19const char *ErrorToString(const Error &E) {
20  switch (E) {
21  case Error::UNKNOWN:
22    return "Unknown";
23  case Error::USE_AFTER_FREE:
24    return "Use After Free";
25  case Error::DOUBLE_FREE:
26    return "Double Free";
27  case Error::INVALID_FREE:
28    return "Invalid (Wild) Free";
29  case Error::BUFFER_OVERFLOW:
30    return "Buffer Overflow";
31  case Error::BUFFER_UNDERFLOW:
32    return "Buffer Underflow";
33  }
34  __builtin_trap();
35}
36
37void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr,
38                                          size_t AllocSize) {
39  Addr = AllocAddr;
40  Size = AllocSize;
41  IsDeallocated = false;
42
43  AllocationTrace.ThreadID = getThreadID();
44  DeallocationTrace.TraceSize = 0;
45  DeallocationTrace.ThreadID = kInvalidThreadID;
46}
47
48void AllocationMetadata::RecordDeallocation() {
49  IsDeallocated = true;
50  DeallocationTrace.ThreadID = getThreadID();
51}
52
53void AllocationMetadata::CallSiteInfo::RecordBacktrace(
54    options::Backtrace_t Backtrace) {
55  TraceSize = 0;
56  if (!Backtrace)
57    return;
58
59  uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect];
60  size_t BacktraceLength =
61      Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect);
62  // Backtrace() returns the number of available frames, which may be greater
63  // than the number of frames in the buffer. In this case, we need to only pack
64  // the number of frames that are in the buffer.
65  if (BacktraceLength > kMaxTraceLengthToCollect)
66    BacktraceLength = kMaxTraceLengthToCollect;
67  TraceSize =
68      compression::pack(UncompressedBuffer, BacktraceLength, CompressedTrace,
69                        AllocationMetadata::kStackFrameStorageBytes);
70}
71
72size_t AllocatorState::maximumAllocationSize() const { return PageSize; }
73
74uintptr_t AllocatorState::slotToAddr(size_t N) const {
75  return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N);
76}
77
78bool AllocatorState::isGuardPage(uintptr_t Ptr) const {
79  assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
80  size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize;
81  size_t PagesPerSlot = maximumAllocationSize() / PageSize;
82  return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0;
83}
84
85static size_t addrToSlot(const AllocatorState *State, uintptr_t Ptr) {
86  size_t ByteOffsetFromPoolStart = Ptr - State->GuardedPagePool;
87  return ByteOffsetFromPoolStart /
88         (State->maximumAllocationSize() + State->PageSize);
89}
90
91size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const {
92  if (Ptr <= GuardedPagePool + PageSize)
93    return 0;
94  if (Ptr > GuardedPagePoolEnd - PageSize)
95    return MaxSimultaneousAllocations - 1;
96
97  if (!isGuardPage(Ptr))
98    return addrToSlot(this, Ptr);
99
100  if (Ptr % PageSize <= PageSize / 2)
101    return addrToSlot(this, Ptr - PageSize); // Round down.
102  return addrToSlot(this, Ptr + PageSize);   // Round up.
103}
104
105} // namespace gwp_asan
106