1#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H 2#define JEMALLOC_INTERNAL_PROF_EXTERNS_H 3 4#include "jemalloc/internal/mutex.h" 5 6extern malloc_mutex_t bt2gctx_mtx; 7 8extern bool opt_prof; 9extern bool opt_prof_active; 10extern bool opt_prof_thread_active_init; 11extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ 12extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ 13extern bool opt_prof_gdump; /* High-water memory dumping. */ 14extern bool opt_prof_final; /* Final profile dumping. */ 15extern bool opt_prof_leak; /* Dump leak summary at exit. */ 16extern bool opt_prof_accum; /* Report cumulative bytes. */ 17extern char opt_prof_prefix[ 18 /* Minimize memory bloat for non-prof builds. */ 19#ifdef JEMALLOC_PROF 20 PATH_MAX + 21#endif 22 1]; 23 24/* Accessed via prof_active_[gs]et{_unlocked,}(). */ 25extern bool prof_active; 26 27/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ 28extern bool prof_gdump_val; 29 30/* 31 * Profile dump interval, measured in bytes allocated. Each arena triggers a 32 * profile dump when it reaches this threshold. The effect is that the 33 * interval between profile dumps averages prof_interval, though the actual 34 * interval between dumps will tend to be sporadic, and the interval will be a 35 * maximum of approximately (prof_interval * narenas). 36 */ 37extern uint64_t prof_interval; 38 39/* 40 * Initialized as opt_lg_prof_sample, and potentially modified during profiling 41 * resets. 42 */ 43extern size_t lg_prof_sample; 44 45void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); 46void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, 47 prof_tctx_t *tctx); 48void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); 49void bt_init(prof_bt_t *bt, void **vec); 50void prof_backtrace(prof_bt_t *bt); 51prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); 52#ifdef JEMALLOC_JET 53size_t prof_tdata_count(void); 54size_t prof_bt_count(void); 55#endif 56typedef int (prof_dump_open_t)(bool, const char *); 57extern prof_dump_open_t *JET_MUTABLE prof_dump_open; 58 59typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); 60extern prof_dump_header_t *JET_MUTABLE prof_dump_header; 61#ifdef JEMALLOC_JET 62void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, 63 uint64_t *accumbytes); 64#endif 65bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); 66void prof_idump(tsdn_t *tsdn); 67bool prof_mdump(tsd_t *tsd, const char *filename); 68void prof_gdump(tsdn_t *tsdn); 69prof_tdata_t *prof_tdata_init(tsd_t *tsd); 70prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); 71void prof_reset(tsd_t *tsd, size_t lg_sample); 72void prof_tdata_cleanup(tsd_t *tsd); 73bool prof_active_get(tsdn_t *tsdn); 74bool prof_active_set(tsdn_t *tsdn, bool active); 75const char *prof_thread_name_get(tsd_t *tsd); 76int prof_thread_name_set(tsd_t *tsd, const char *thread_name); 77bool prof_thread_active_get(tsd_t *tsd); 78bool prof_thread_active_set(tsd_t *tsd, bool active); 79bool prof_thread_active_init_get(tsdn_t *tsdn); 80bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); 81bool prof_gdump_get(tsdn_t *tsdn); 82bool prof_gdump_set(tsdn_t *tsdn, bool active); 83void prof_boot0(void); 84void prof_boot1(void); 85bool prof_boot2(tsd_t *tsd); 86void prof_prefork0(tsdn_t *tsdn); 87void prof_prefork1(tsdn_t *tsdn); 88void prof_postfork_parent(tsdn_t *tsdn); 89void prof_postfork_child(tsdn_t *tsdn); 90void prof_sample_threshold_update(prof_tdata_t *tdata); 91 92#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ 93