1//===-- xray_buffer_queue.h ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of XRay, a dynamic runtime instrumentation system.
10//
11// Defines the interface for a buffer queue implementation.
12//
13//===----------------------------------------------------------------------===//
14#ifndef XRAY_BUFFER_QUEUE_H
15#define XRAY_BUFFER_QUEUE_H
16
17#include "sanitizer_common/sanitizer_atomic.h"
18#include "sanitizer_common/sanitizer_common.h"
19#include "sanitizer_common/sanitizer_mutex.h"
20#include "xray_defs.h"
21#include <cstddef>
22#include <cstdint>
23
24namespace __xray {
25
26/// BufferQueue implements a circular queue of fixed sized buffers (much like a
27/// freelist) but is concerned with making it quick to initialise, finalise, and
28/// get from or return buffers to the queue. This is one key component of the
29/// "flight data recorder" (FDR) mode to support ongoing XRay function call
30/// trace collection.
31class BufferQueue {
32public:
33  /// ControlBlock represents the memory layout of how we interpret the backing
34  /// store for all buffers and extents managed by a BufferQueue instance. The
35  /// ControlBlock has the reference count as the first member, sized according
36  /// to platform-specific cache-line size. We never use the Buffer member of
37  /// the union, which is only there for compiler-supported alignment and
38  /// sizing.
39  ///
40  /// This ensures that the `Data` member will be placed at least kCacheLineSize
41  /// bytes from the beginning of the structure.
42  struct ControlBlock {
43    union {
44      atomic_uint64_t RefCount;
45      char Buffer[kCacheLineSize];
46    };
47
48    /// We need to make this size 1, to conform to the C++ rules for array data
49    /// members. Typically, we want to subtract this 1 byte for sizing
50    /// information.
51    char Data[1];
52  };
53
54  struct Buffer {
55    atomic_uint64_t *Extents = nullptr;
56    uint64_t Generation{0};
57    void *Data = nullptr;
58    size_t Size = 0;
59
60  private:
61    friend class BufferQueue;
62    ControlBlock *BackingStore = nullptr;
63    ControlBlock *ExtentsBackingStore = nullptr;
64    size_t Count = 0;
65  };
66
67  struct BufferRep {
68    // The managed buffer.
69    Buffer Buff;
70
71    // This is true if the buffer has been returned to the available queue, and
72    // is considered "used" by another thread.
73    bool Used = false;
74  };
75
76private:
77  // This models a ForwardIterator. |T| Must be either a `Buffer` or `const
78  // Buffer`. Note that we only advance to the "used" buffers, when
79  // incrementing, so that at dereference we're always at a valid point.
80  template <class T> class Iterator {
81  public:
82    BufferRep *Buffers = nullptr;
83    size_t Offset = 0;
84    size_t Max = 0;
85
86    Iterator &operator++() {
87      DCHECK_NE(Offset, Max);
88      do {
89        ++Offset;
90      } while (!Buffers[Offset].Used && Offset != Max);
91      return *this;
92    }
93
94    Iterator operator++(int) {
95      Iterator C = *this;
96      ++(*this);
97      return C;
98    }
99
100    T &operator*() const { return Buffers[Offset].Buff; }
101
102    T *operator->() const { return &(Buffers[Offset].Buff); }
103
104    Iterator(BufferRep *Root, size_t O, size_t M) XRAY_NEVER_INSTRUMENT
105        : Buffers(Root),
106          Offset(O),
107          Max(M) {
108      // We want to advance to the first Offset where the 'Used' property is
109      // true, or to the end of the list/queue.
110      while (!Buffers[Offset].Used && Offset != Max) {
111        ++Offset;
112      }
113    }
114
115    Iterator() = default;
116    Iterator(const Iterator &) = default;
117    Iterator(Iterator &&) = default;
118    Iterator &operator=(const Iterator &) = default;
119    Iterator &operator=(Iterator &&) = default;
120    ~Iterator() = default;
121
122    template <class V>
123    friend bool operator==(const Iterator &L, const Iterator<V> &R) {
124      DCHECK_EQ(L.Max, R.Max);
125      return L.Buffers == R.Buffers && L.Offset == R.Offset;
126    }
127
128    template <class V>
129    friend bool operator!=(const Iterator &L, const Iterator<V> &R) {
130      return !(L == R);
131    }
132  };
133
134  // Size of each individual Buffer.
135  size_t BufferSize;
136
137  // Amount of pre-allocated buffers.
138  size_t BufferCount;
139
140  SpinMutex Mutex;
141  atomic_uint8_t Finalizing;
142
143  // The collocated ControlBlock and buffer storage.
144  ControlBlock *BackingStore;
145
146  // The collocated ControlBlock and extents storage.
147  ControlBlock *ExtentsBackingStore;
148
149  // A dynamically allocated array of BufferRep instances.
150  BufferRep *Buffers;
151
152  // Pointer to the next buffer to be handed out.
153  BufferRep *Next;
154
155  // Pointer to the entry in the array where the next released buffer will be
156  // placed.
157  BufferRep *First;
158
159  // Count of buffers that have been handed out through 'getBuffer'.
160  size_t LiveBuffers;
161
162  // We use a generation number to identify buffers and which generation they're
163  // associated with.
164  atomic_uint64_t Generation;
165
166  /// Releases references to the buffers backed by the current buffer queue.
167  void cleanupBuffers();
168
169public:
170  enum class ErrorCode : unsigned {
171    Ok,
172    NotEnoughMemory,
173    QueueFinalizing,
174    UnrecognizedBuffer,
175    AlreadyFinalized,
176    AlreadyInitialized,
177  };
178
179  static const char *getErrorString(ErrorCode E) {
180    switch (E) {
181    case ErrorCode::Ok:
182      return "(none)";
183    case ErrorCode::NotEnoughMemory:
184      return "no available buffers in the queue";
185    case ErrorCode::QueueFinalizing:
186      return "queue already finalizing";
187    case ErrorCode::UnrecognizedBuffer:
188      return "buffer being returned not owned by buffer queue";
189    case ErrorCode::AlreadyFinalized:
190      return "queue already finalized";
191    case ErrorCode::AlreadyInitialized:
192      return "queue already initialized";
193    }
194    return "unknown error";
195  }
196
197  /// Initialise a queue of size |N| with buffers of size |B|. We report success
198  /// through |Success|.
199  BufferQueue(size_t B, size_t N, bool &Success);
200
201  /// Updates |Buf| to contain the pointer to an appropriate buffer. Returns an
202  /// error in case there are no available buffers to return when we will run
203  /// over the upper bound for the total buffers.
204  ///
205  /// Requirements:
206  ///   - BufferQueue is not finalising.
207  ///
208  /// Returns:
209  ///   - ErrorCode::NotEnoughMemory on exceeding MaxSize.
210  ///   - ErrorCode::Ok when we find a Buffer.
211  ///   - ErrorCode::QueueFinalizing or ErrorCode::AlreadyFinalized on
212  ///     a finalizing/finalized BufferQueue.
213  ErrorCode getBuffer(Buffer &Buf);
214
215  /// Updates |Buf| to point to nullptr, with size 0.
216  ///
217  /// Returns:
218  ///   - ErrorCode::Ok when we successfully release the buffer.
219  ///   - ErrorCode::UnrecognizedBuffer for when this BufferQueue does not own
220  ///     the buffer being released.
221  ErrorCode releaseBuffer(Buffer &Buf);
222
223  /// Initializes the buffer queue, starting a new generation. We can re-set the
224  /// size of buffers with |BS| along with the buffer count with |BC|.
225  ///
226  /// Returns:
227  ///   - ErrorCode::Ok when we successfully initialize the buffer. This
228  ///   requires that the buffer queue is previously finalized.
229  ///   - ErrorCode::AlreadyInitialized when the buffer queue is not finalized.
230  ErrorCode init(size_t BS, size_t BC);
231
232  bool finalizing() const {
233    return atomic_load(&Finalizing, memory_order_acquire);
234  }
235
236  uint64_t generation() const {
237    return atomic_load(&Generation, memory_order_acquire);
238  }
239
240  /// Returns the configured size of the buffers in the buffer queue.
241  size_t ConfiguredBufferSize() const { return BufferSize; }
242
243  /// Sets the state of the BufferQueue to finalizing, which ensures that:
244  ///
245  ///   - All subsequent attempts to retrieve a Buffer will fail.
246  ///   - All releaseBuffer operations will not fail.
247  ///
248  /// After a call to finalize succeeds, all subsequent calls to finalize will
249  /// fail with ErrorCode::QueueFinalizing.
250  ErrorCode finalize();
251
252  /// Applies the provided function F to each Buffer in the queue, only if the
253  /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a
254  /// releaseBuffer(...) operation).
255  template <class F> void apply(F Fn) XRAY_NEVER_INSTRUMENT {
256    SpinMutexLock G(&Mutex);
257    for (auto I = begin(), E = end(); I != E; ++I)
258      Fn(*I);
259  }
260
261  using const_iterator = Iterator<const Buffer>;
262  using iterator = Iterator<Buffer>;
263
264  /// Provides iterator access to the raw Buffer instances.
265  iterator begin() const { return iterator(Buffers, 0, BufferCount); }
266  const_iterator cbegin() const {
267    return const_iterator(Buffers, 0, BufferCount);
268  }
269  iterator end() const { return iterator(Buffers, BufferCount, BufferCount); }
270  const_iterator cend() const {
271    return const_iterator(Buffers, BufferCount, BufferCount);
272  }
273
274  // Cleans up allocated buffers.
275  ~BufferQueue();
276};
277
278} // namespace __xray
279
280#endif // XRAY_BUFFER_QUEUE_H
281