stats.h revision 351282
1351282Sdim//===-- stats.h -------------------------------------------------*- C++ -*-===// 2351282Sdim// 3351282Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4351282Sdim// See https://llvm.org/LICENSE.txt for license information. 5351282Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6351282Sdim// 7351282Sdim//===----------------------------------------------------------------------===// 8351282Sdim 9351282Sdim#ifndef SCUDO_STATS_H_ 10351282Sdim#define SCUDO_STATS_H_ 11351282Sdim 12351282Sdim#include "atomic_helpers.h" 13351282Sdim#include "mutex.h" 14351282Sdim 15351282Sdim#include <string.h> 16351282Sdim 17351282Sdimnamespace scudo { 18351282Sdim 19351282Sdim// Memory allocator statistics 20351282Sdimenum StatType { StatAllocated, StatMapped, StatCount }; 21351282Sdim 22351282Sdimtypedef uptr StatCounters[StatCount]; 23351282Sdim 24351282Sdim// Per-thread stats, live in per-thread cache. We use atomics so that the 25351282Sdim// numbers themselves are consistent. But we don't use atomic_{add|sub} or a 26351282Sdim// lock, because those are expensive operations , and we only care for the stats 27351282Sdim// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is 28351282Sdim// LocalStats::add'ing, this is OK, we will still get a meaningful number. 29351282Sdimclass LocalStats { 30351282Sdimpublic: 31351282Sdim void initLinkerInitialized() {} 32351282Sdim void init() { memset(this, 0, sizeof(*this)); } 33351282Sdim 34351282Sdim void add(StatType I, uptr V) { 35351282Sdim V += atomic_load_relaxed(&StatsArray[I]); 36351282Sdim atomic_store_relaxed(&StatsArray[I], V); 37351282Sdim } 38351282Sdim 39351282Sdim void sub(StatType I, uptr V) { 40351282Sdim V = atomic_load_relaxed(&StatsArray[I]) - V; 41351282Sdim atomic_store_relaxed(&StatsArray[I], V); 42351282Sdim } 43351282Sdim 44351282Sdim void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); } 45351282Sdim 46351282Sdim uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); } 47351282Sdim 48351282Sdimprivate: 49351282Sdim friend class GlobalStats; 50351282Sdim atomic_uptr StatsArray[StatCount]; 51351282Sdim LocalStats *Next; 52351282Sdim LocalStats *Prev; 53351282Sdim}; 54351282Sdim 55351282Sdim// Global stats, used for aggregation and querying. 56351282Sdimclass GlobalStats : public LocalStats { 57351282Sdimpublic: 58351282Sdim void initLinkerInitialized() { 59351282Sdim Next = this; 60351282Sdim Prev = this; 61351282Sdim } 62351282Sdim void init() { 63351282Sdim memset(this, 0, sizeof(*this)); 64351282Sdim initLinkerInitialized(); 65351282Sdim } 66351282Sdim 67351282Sdim void link(LocalStats *S) { 68351282Sdim ScopedLock L(Mutex); 69351282Sdim S->Next = Next; 70351282Sdim S->Prev = this; 71351282Sdim Next->Prev = S; 72351282Sdim Next = S; 73351282Sdim } 74351282Sdim 75351282Sdim void unlink(LocalStats *S) { 76351282Sdim ScopedLock L(Mutex); 77351282Sdim S->Prev->Next = S->Next; 78351282Sdim S->Next->Prev = S->Prev; 79351282Sdim for (uptr I = 0; I < StatCount; I++) 80351282Sdim add(static_cast<StatType>(I), S->get(static_cast<StatType>(I))); 81351282Sdim } 82351282Sdim 83351282Sdim void get(uptr *S) const { 84351282Sdim memset(S, 0, StatCount * sizeof(uptr)); 85351282Sdim ScopedLock L(Mutex); 86351282Sdim const LocalStats *Stats = this; 87351282Sdim for (;;) { 88351282Sdim for (uptr I = 0; I < StatCount; I++) 89351282Sdim S[I] += Stats->get(static_cast<StatType>(I)); 90351282Sdim Stats = Stats->Next; 91351282Sdim if (Stats == this) 92351282Sdim break; 93351282Sdim } 94351282Sdim // All stats must be non-negative. 95351282Sdim for (uptr I = 0; I < StatCount; I++) 96351282Sdim S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0; 97351282Sdim } 98351282Sdim 99351282Sdimprivate: 100351282Sdim mutable HybridMutex Mutex; 101351282Sdim}; 102351282Sdim 103351282Sdim} // namespace scudo 104351282Sdim 105351282Sdim#endif // SCUDO_STATS_H_ 106