1351282Sdim//===-- stats.h -------------------------------------------------*- C++ -*-===//
2351282Sdim//
3351282Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4351282Sdim// See https://llvm.org/LICENSE.txt for license information.
5351282Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6351282Sdim//
7351282Sdim//===----------------------------------------------------------------------===//
8351282Sdim
9351282Sdim#ifndef SCUDO_STATS_H_
10351282Sdim#define SCUDO_STATS_H_
11351282Sdim
12351282Sdim#include "atomic_helpers.h"
13360784Sdim#include "list.h"
14351282Sdim#include "mutex.h"
15351282Sdim
16351282Sdim#include <string.h>
17351282Sdim
18351282Sdimnamespace scudo {
19351282Sdim
20351282Sdim// Memory allocator statistics
21360784Sdimenum StatType { StatAllocated, StatFree, StatMapped, StatCount };
22351282Sdim
23351282Sdimtypedef uptr StatCounters[StatCount];
24351282Sdim
25351282Sdim// Per-thread stats, live in per-thread cache. We use atomics so that the
26351282Sdim// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
27351282Sdim// lock, because those are expensive operations , and we only care for the stats
28351282Sdim// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
29351282Sdim// LocalStats::add'ing, this is OK, we will still get a meaningful number.
30351282Sdimclass LocalStats {
31351282Sdimpublic:
32351282Sdim  void initLinkerInitialized() {}
33351282Sdim  void init() { memset(this, 0, sizeof(*this)); }
34351282Sdim
35351282Sdim  void add(StatType I, uptr V) {
36351282Sdim    V += atomic_load_relaxed(&StatsArray[I]);
37351282Sdim    atomic_store_relaxed(&StatsArray[I], V);
38351282Sdim  }
39351282Sdim
40351282Sdim  void sub(StatType I, uptr V) {
41351282Sdim    V = atomic_load_relaxed(&StatsArray[I]) - V;
42351282Sdim    atomic_store_relaxed(&StatsArray[I], V);
43351282Sdim  }
44351282Sdim
45351282Sdim  void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
46351282Sdim
47351282Sdim  uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
48351282Sdim
49360784Sdim  LocalStats *Next;
50360784Sdim  LocalStats *Prev;
51360784Sdim
52351282Sdimprivate:
53351282Sdim  atomic_uptr StatsArray[StatCount];
54351282Sdim};
55351282Sdim
56351282Sdim// Global stats, used for aggregation and querying.
57351282Sdimclass GlobalStats : public LocalStats {
58351282Sdimpublic:
59360784Sdim  void initLinkerInitialized() {}
60351282Sdim  void init() {
61351282Sdim    memset(this, 0, sizeof(*this));
62351282Sdim    initLinkerInitialized();
63351282Sdim  }
64351282Sdim
65351282Sdim  void link(LocalStats *S) {
66351282Sdim    ScopedLock L(Mutex);
67360784Sdim    StatsList.push_back(S);
68351282Sdim  }
69351282Sdim
70351282Sdim  void unlink(LocalStats *S) {
71351282Sdim    ScopedLock L(Mutex);
72360784Sdim    StatsList.remove(S);
73351282Sdim    for (uptr I = 0; I < StatCount; I++)
74351282Sdim      add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
75351282Sdim  }
76351282Sdim
77351282Sdim  void get(uptr *S) const {
78351282Sdim    ScopedLock L(Mutex);
79360784Sdim    for (uptr I = 0; I < StatCount; I++)
80360784Sdim      S[I] = LocalStats::get(static_cast<StatType>(I));
81360784Sdim    for (const auto &Stats : StatsList) {
82351282Sdim      for (uptr I = 0; I < StatCount; I++)
83360784Sdim        S[I] += Stats.get(static_cast<StatType>(I));
84351282Sdim    }
85351282Sdim    // All stats must be non-negative.
86351282Sdim    for (uptr I = 0; I < StatCount; I++)
87351282Sdim      S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
88351282Sdim  }
89351282Sdim
90360784Sdim  void disable() { Mutex.lock(); }
91360784Sdim  void enable() { Mutex.unlock(); }
92360784Sdim
93351282Sdimprivate:
94351282Sdim  mutable HybridMutex Mutex;
95360784Sdim  DoublyLinkedList<LocalStats> StatsList;
96351282Sdim};
97351282Sdim
98351282Sdim} // namespace scudo
99351282Sdim
100351282Sdim#endif // SCUDO_STATS_H_
101