1//===-- stats.h -------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_STATS_H_
10#define SCUDO_STATS_H_
11
12#include "atomic_helpers.h"
13#include "list.h"
14#include "mutex.h"
15
16#include <string.h>
17
18namespace scudo {
19
20// Memory allocator statistics
21enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
22
23typedef uptr StatCounters[StatCount];
24
25// Per-thread stats, live in per-thread cache. We use atomics so that the
26// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
27// lock, because those are expensive operations , and we only care for the stats
28// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
29// LocalStats::add'ing, this is OK, we will still get a meaningful number.
30class LocalStats {
31public:
32  void init() {
33    for (uptr I = 0; I < StatCount; I++)
34      DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
35  }
36
37  void add(StatType I, uptr V) {
38    V += atomic_load_relaxed(&StatsArray[I]);
39    atomic_store_relaxed(&StatsArray[I], V);
40  }
41
42  void sub(StatType I, uptr V) {
43    V = atomic_load_relaxed(&StatsArray[I]) - V;
44    atomic_store_relaxed(&StatsArray[I], V);
45  }
46
47  void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
48
49  uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
50
51  LocalStats *Next = nullptr;
52  LocalStats *Prev = nullptr;
53
54private:
55  atomic_uptr StatsArray[StatCount] = {};
56};
57
58// Global stats, used for aggregation and querying.
59class GlobalStats : public LocalStats {
60public:
61  void init() { LocalStats::init(); }
62
63  void link(LocalStats *S) {
64    ScopedLock L(Mutex);
65    StatsList.push_back(S);
66  }
67
68  void unlink(LocalStats *S) {
69    ScopedLock L(Mutex);
70    StatsList.remove(S);
71    for (uptr I = 0; I < StatCount; I++)
72      add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
73  }
74
75  void get(uptr *S) const {
76    ScopedLock L(Mutex);
77    for (uptr I = 0; I < StatCount; I++)
78      S[I] = LocalStats::get(static_cast<StatType>(I));
79    for (const auto &Stats : StatsList) {
80      for (uptr I = 0; I < StatCount; I++)
81        S[I] += Stats.get(static_cast<StatType>(I));
82    }
83    // All stats must be non-negative.
84    for (uptr I = 0; I < StatCount; I++)
85      S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
86  }
87
88  void lock() { Mutex.lock(); }
89  void unlock() { Mutex.unlock(); }
90
91  void disable() { lock(); }
92  void enable() { unlock(); }
93
94private:
95  mutable HybridMutex Mutex;
96  DoublyLinkedList<LocalStats> StatsList;
97};
98
99} // namespace scudo
100
101#endif // SCUDO_STATS_H_
102