prof.h revision 286866
1234370Sjasone/******************************************************************************/ 2234370Sjasone#ifdef JEMALLOC_H_TYPES 3234370Sjasone 4234370Sjasonetypedef struct prof_bt_s prof_bt_t; 5234370Sjasonetypedef struct prof_cnt_s prof_cnt_t; 6286866Sjasonetypedef struct prof_tctx_s prof_tctx_t; 7286866Sjasonetypedef struct prof_gctx_s prof_gctx_t; 8234370Sjasonetypedef struct prof_tdata_s prof_tdata_t; 9234370Sjasone 10234370Sjasone/* Option defaults. */ 11262521Sjasone#ifdef JEMALLOC_PROF 12262521Sjasone# define PROF_PREFIX_DEFAULT "jeprof" 13262521Sjasone#else 14262521Sjasone# define PROF_PREFIX_DEFAULT "" 15262521Sjasone#endif 16234543Sjasone#define LG_PROF_SAMPLE_DEFAULT 19 17234370Sjasone#define LG_PROF_INTERVAL_DEFAULT -1 18234370Sjasone 19234370Sjasone/* 20234370Sjasone * Hard limit on stack backtrace depth. The version of prof_backtrace() that 21234370Sjasone * is based on __builtin_return_address() necessarily has a hard-coded number 22234370Sjasone * of backtrace frame handlers, and should be kept in sync with this setting. 23234370Sjasone */ 24234370Sjasone#define PROF_BT_MAX 128 25234370Sjasone 26234370Sjasone/* Initial hash table size. */ 27234370Sjasone#define PROF_CKH_MINITEMS 64 28234370Sjasone 29234370Sjasone/* Size of memory buffer to use when writing dump files. */ 30234370Sjasone#define PROF_DUMP_BUFSIZE 65536 31234370Sjasone 32234370Sjasone/* Size of stack-allocated buffer used by prof_printf(). */ 33234370Sjasone#define PROF_PRINTF_BUFSIZE 128 34234370Sjasone 35234370Sjasone/* 36286866Sjasone * Number of mutexes shared among all gctx's. No space is allocated for these 37234370Sjasone * unless profiling is enabled, so it's okay to over-provision. 38234370Sjasone */ 39234370Sjasone#define PROF_NCTX_LOCKS 1024 40234370Sjasone 41235238Sjasone/* 42286866Sjasone * Number of mutexes shared among all tdata's. No space is allocated for these 43286866Sjasone * unless profiling is enabled, so it's okay to over-provision. 44286866Sjasone */ 45286866Sjasone#define PROF_NTDATA_LOCKS 256 46286866Sjasone 47286866Sjasone/* 48235238Sjasone * prof_tdata pointers close to NULL are used to encode state information that 49235238Sjasone * is used for cleaning up during thread shutdown. 50235238Sjasone */ 51235238Sjasone#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) 52235238Sjasone#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) 53235238Sjasone#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY 54235238Sjasone 55234370Sjasone#endif /* JEMALLOC_H_TYPES */ 56234370Sjasone/******************************************************************************/ 57234370Sjasone#ifdef JEMALLOC_H_STRUCTS 58234370Sjasone 59234370Sjasonestruct prof_bt_s { 60234370Sjasone /* Backtrace, stored as len program counters. */ 61234370Sjasone void **vec; 62234370Sjasone unsigned len; 63234370Sjasone}; 64234370Sjasone 65234370Sjasone#ifdef JEMALLOC_PROF_LIBGCC 66234370Sjasone/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ 67234370Sjasonetypedef struct { 68234370Sjasone prof_bt_t *bt; 69234370Sjasone unsigned max; 70234370Sjasone} prof_unwind_data_t; 71234370Sjasone#endif 72234370Sjasone 73234370Sjasonestruct prof_cnt_s { 74286866Sjasone /* Profiling counters. */ 75286866Sjasone uint64_t curobjs; 76286866Sjasone uint64_t curbytes; 77234370Sjasone uint64_t accumobjs; 78234370Sjasone uint64_t accumbytes; 79234370Sjasone}; 80234370Sjasone 81286866Sjasonetypedef enum { 82286866Sjasone prof_tctx_state_initializing, 83286866Sjasone prof_tctx_state_nominal, 84286866Sjasone prof_tctx_state_dumping, 85286866Sjasone prof_tctx_state_purgatory /* Dumper must finish destroying. */ 86286866Sjasone} prof_tctx_state_t; 87234370Sjasone 88286866Sjasonestruct prof_tctx_s { 89286866Sjasone /* Thread data for thread that performed the allocation. */ 90286866Sjasone prof_tdata_t *tdata; 91234370Sjasone 92234370Sjasone /* 93286866Sjasone * Copy of tdata->thr_uid, necessary because tdata may be defunct during 94286866Sjasone * teardown. 95234370Sjasone */ 96286866Sjasone uint64_t thr_uid; 97234370Sjasone 98286866Sjasone /* Profiling counters, protected by tdata->lock. */ 99286866Sjasone prof_cnt_t cnts; 100286866Sjasone 101286866Sjasone /* Associated global context. */ 102286866Sjasone prof_gctx_t *gctx; 103286866Sjasone 104234370Sjasone /* 105286866Sjasone * UID that distinguishes multiple tctx's created by the same thread, 106286866Sjasone * but coexisting in gctx->tctxs. There are two ways that such 107286866Sjasone * coexistence can occur: 108286866Sjasone * - A dumper thread can cause a tctx to be retained in the purgatory 109286866Sjasone * state. 110286866Sjasone * - Although a single "producer" thread must create all tctx's which 111286866Sjasone * share the same thr_uid, multiple "consumers" can each concurrently 112286866Sjasone * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only 113286866Sjasone * gets called once each time cnts.cur{objs,bytes} drop to 0, but this 114286866Sjasone * threshold can be hit again before the first consumer finishes 115286866Sjasone * executing prof_tctx_destroy(). 116234370Sjasone */ 117286866Sjasone uint64_t tctx_uid; 118234370Sjasone 119286866Sjasone /* Linkage into gctx's tctxs. */ 120286866Sjasone rb_node(prof_tctx_t) tctx_link; 121286866Sjasone 122286866Sjasone /* 123286866Sjasone * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents 124286866Sjasone * sample vs destroy race. 125286866Sjasone */ 126286866Sjasone bool prepared; 127286866Sjasone 128286866Sjasone /* Current dump-related state, protected by gctx->lock. */ 129286866Sjasone prof_tctx_state_t state; 130286866Sjasone 131286866Sjasone /* 132286866Sjasone * Copy of cnts snapshotted during early dump phase, protected by 133286866Sjasone * dump_mtx. 134286866Sjasone */ 135286866Sjasone prof_cnt_t dump_cnts; 136234370Sjasone}; 137286866Sjasonetypedef rb_tree(prof_tctx_t) prof_tctx_tree_t; 138234370Sjasone 139286866Sjasonestruct prof_gctx_s { 140286866Sjasone /* Protects nlimbo, cnt_summed, and tctxs. */ 141234370Sjasone malloc_mutex_t *lock; 142234370Sjasone 143235238Sjasone /* 144286866Sjasone * Number of threads that currently cause this gctx to be in a state of 145235238Sjasone * limbo due to one of: 146286866Sjasone * - Initializing this gctx. 147286866Sjasone * - Initializing per thread counters associated with this gctx. 148286866Sjasone * - Preparing to destroy this gctx. 149286866Sjasone * - Dumping a heap profile that includes this gctx. 150235238Sjasone * nlimbo must be 1 (single destroyer) in order to safely destroy the 151286866Sjasone * gctx. 152235238Sjasone */ 153235238Sjasone unsigned nlimbo; 154235238Sjasone 155286866Sjasone /* 156286866Sjasone * Tree of profile counters, one for each thread that has allocated in 157286866Sjasone * this context. 158286866Sjasone */ 159286866Sjasone prof_tctx_tree_t tctxs; 160286866Sjasone 161286866Sjasone /* Linkage for tree of contexts to be dumped. */ 162286866Sjasone rb_node(prof_gctx_t) dump_link; 163286866Sjasone 164234370Sjasone /* Temporary storage for summation during dump. */ 165234370Sjasone prof_cnt_t cnt_summed; 166234370Sjasone 167286866Sjasone /* Associated backtrace. */ 168286866Sjasone prof_bt_t bt; 169234370Sjasone 170286866Sjasone /* Backtrace vector, variable size, referred to by bt. */ 171286866Sjasone void *vec[1]; 172286866Sjasone}; 173286866Sjasonetypedef rb_tree(prof_gctx_t) prof_gctx_tree_t; 174286866Sjasone 175286866Sjasonestruct prof_tdata_s { 176286866Sjasone malloc_mutex_t *lock; 177286866Sjasone 178286866Sjasone /* Monotonically increasing unique thread identifier. */ 179286866Sjasone uint64_t thr_uid; 180286866Sjasone 181234370Sjasone /* 182286866Sjasone * Monotonically increasing discriminator among tdata structures 183286866Sjasone * associated with the same thr_uid. 184234370Sjasone */ 185286866Sjasone uint64_t thr_discrim; 186261071Sjasone 187286866Sjasone /* Included in heap profile dumps if non-NULL. */ 188286866Sjasone char *thread_name; 189234370Sjasone 190286866Sjasone bool attached; 191286866Sjasone bool expired; 192286866Sjasone 193286866Sjasone rb_node(prof_tdata_t) tdata_link; 194286866Sjasone 195234370Sjasone /* 196286866Sjasone * Counter used to initialize prof_tctx_t's tctx_uid. No locking is 197286866Sjasone * necessary when incrementing this field, because only one thread ever 198286866Sjasone * does so. 199234370Sjasone */ 200286866Sjasone uint64_t tctx_uid_next; 201234370Sjasone 202286866Sjasone /* 203286866Sjasone * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks 204286866Sjasone * backtraces for which it has non-zero allocation/deallocation counters 205286866Sjasone * associated with thread-specific prof_tctx_t objects. Other threads 206286866Sjasone * may write to prof_tctx_t contents when freeing associated objects. 207286866Sjasone */ 208286866Sjasone ckh_t bt2tctx; 209234370Sjasone 210234370Sjasone /* Sampling state. */ 211234370Sjasone uint64_t prng_state; 212286866Sjasone uint64_t bytes_until_sample; 213235238Sjasone 214235238Sjasone /* State used to avoid dumping while operating on prof internals. */ 215235238Sjasone bool enq; 216235238Sjasone bool enq_idump; 217235238Sjasone bool enq_gdump; 218286866Sjasone 219286866Sjasone /* 220286866Sjasone * Set to true during an early dump phase for tdata's which are 221286866Sjasone * currently being dumped. New threads' tdata's have this initialized 222286866Sjasone * to false so that they aren't accidentally included in later dump 223286866Sjasone * phases. 224286866Sjasone */ 225286866Sjasone bool dumping; 226286866Sjasone 227286866Sjasone /* 228286866Sjasone * True if profiling is active for this tdata's thread 229286866Sjasone * (thread.prof.active mallctl). 230286866Sjasone */ 231286866Sjasone bool active; 232286866Sjasone 233286866Sjasone /* Temporary storage for summation during dump. */ 234286866Sjasone prof_cnt_t cnt_summed; 235286866Sjasone 236286866Sjasone /* Backtrace vector, used for calls to prof_backtrace(). */ 237286866Sjasone void *vec[PROF_BT_MAX]; 238234370Sjasone}; 239286866Sjasonetypedef rb_tree(prof_tdata_t) prof_tdata_tree_t; 240234370Sjasone 241234370Sjasone#endif /* JEMALLOC_H_STRUCTS */ 242234370Sjasone/******************************************************************************/ 243234370Sjasone#ifdef JEMALLOC_H_EXTERNS 244234370Sjasone 245234370Sjasoneextern bool opt_prof; 246234370Sjasoneextern bool opt_prof_active; 247286866Sjasoneextern bool opt_prof_thread_active_init; 248234370Sjasoneextern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ 249234370Sjasoneextern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ 250234370Sjasoneextern bool opt_prof_gdump; /* High-water memory dumping. */ 251234543Sjasoneextern bool opt_prof_final; /* Final profile dumping. */ 252234370Sjasoneextern bool opt_prof_leak; /* Dump leak summary at exit. */ 253234370Sjasoneextern bool opt_prof_accum; /* Report cumulative bytes. */ 254261071Sjasoneextern char opt_prof_prefix[ 255261071Sjasone /* Minimize memory bloat for non-prof builds. */ 256261071Sjasone#ifdef JEMALLOC_PROF 257261071Sjasone PATH_MAX + 258261071Sjasone#endif 259261071Sjasone 1]; 260234370Sjasone 261286866Sjasone/* Accessed via prof_active_[gs]et{_unlocked,}(). */ 262286866Sjasoneextern bool prof_active; 263286866Sjasone 264286866Sjasone/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ 265286866Sjasoneextern bool prof_gdump_val; 266286866Sjasone 267234370Sjasone/* 268234370Sjasone * Profile dump interval, measured in bytes allocated. Each arena triggers a 269234370Sjasone * profile dump when it reaches this threshold. The effect is that the 270234370Sjasone * interval between profile dumps averages prof_interval, though the actual 271234370Sjasone * interval between dumps will tend to be sporadic, and the interval will be a 272234370Sjasone * maximum of approximately (prof_interval * narenas). 273234370Sjasone */ 274234370Sjasoneextern uint64_t prof_interval; 275234370Sjasone 276234370Sjasone/* 277286866Sjasone * Initialized as opt_lg_prof_sample, and potentially modified during profiling 278286866Sjasone * resets. 279234370Sjasone */ 280286866Sjasoneextern size_t lg_prof_sample; 281234370Sjasone 282286866Sjasonevoid prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); 283286866Sjasonevoid prof_malloc_sample_object(const void *ptr, size_t usize, 284286866Sjasone prof_tctx_t *tctx); 285286866Sjasonevoid prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); 286234370Sjasonevoid bt_init(prof_bt_t *bt, void **vec); 287286866Sjasonevoid prof_backtrace(prof_bt_t *bt); 288286866Sjasoneprof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); 289261071Sjasone#ifdef JEMALLOC_JET 290286866Sjasonesize_t prof_tdata_count(void); 291261071Sjasonesize_t prof_bt_count(void); 292286866Sjasoneconst prof_cnt_t *prof_cnt_all(void); 293261071Sjasonetypedef int (prof_dump_open_t)(bool, const char *); 294261071Sjasoneextern prof_dump_open_t *prof_dump_open; 295286866Sjasonetypedef bool (prof_dump_header_t)(bool, const prof_cnt_t *); 296286866Sjasoneextern prof_dump_header_t *prof_dump_header; 297261071Sjasone#endif 298234370Sjasonevoid prof_idump(void); 299234370Sjasonebool prof_mdump(const char *filename); 300234370Sjasonevoid prof_gdump(void); 301286866Sjasoneprof_tdata_t *prof_tdata_init(tsd_t *tsd); 302286866Sjasoneprof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); 303286866Sjasonevoid prof_reset(tsd_t *tsd, size_t lg_sample); 304286866Sjasonevoid prof_tdata_cleanup(tsd_t *tsd); 305286866Sjasoneconst char *prof_thread_name_get(void); 306286866Sjasonebool prof_active_get(void); 307286866Sjasonebool prof_active_set(bool active); 308286866Sjasoneint prof_thread_name_set(tsd_t *tsd, const char *thread_name); 309286866Sjasonebool prof_thread_active_get(void); 310286866Sjasonebool prof_thread_active_set(bool active); 311286866Sjasonebool prof_thread_active_init_get(void); 312286866Sjasonebool prof_thread_active_init_set(bool active_init); 313286866Sjasonebool prof_gdump_get(void); 314286866Sjasonebool prof_gdump_set(bool active); 315234370Sjasonevoid prof_boot0(void); 316234370Sjasonevoid prof_boot1(void); 317234370Sjasonebool prof_boot2(void); 318242844Sjasonevoid prof_prefork(void); 319242844Sjasonevoid prof_postfork_parent(void); 320242844Sjasonevoid prof_postfork_child(void); 321286866Sjasonevoid prof_sample_threshold_update(prof_tdata_t *tdata); 322234370Sjasone 323234370Sjasone#endif /* JEMALLOC_H_EXTERNS */ 324234370Sjasone/******************************************************************************/ 325234370Sjasone#ifdef JEMALLOC_H_INLINES 326234370Sjasone 327234370Sjasone#ifndef JEMALLOC_ENABLE_INLINE 328286866Sjasonebool prof_active_get_unlocked(void); 329286866Sjasonebool prof_gdump_get_unlocked(void); 330286866Sjasoneprof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); 331286866Sjasonebool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, 332286866Sjasone prof_tdata_t **tdata_out); 333286866Sjasoneprof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update); 334286866Sjasoneprof_tctx_t *prof_tctx_get(const void *ptr); 335286866Sjasonevoid prof_tctx_set(const void *ptr, prof_tctx_t *tctx); 336286866Sjasonevoid prof_malloc_sample_object(const void *ptr, size_t usize, 337286866Sjasone prof_tctx_t *tctx); 338286866Sjasonevoid prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx); 339286866Sjasonevoid prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, 340286866Sjasone prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx); 341286866Sjasonevoid prof_free(tsd_t *tsd, const void *ptr, size_t usize); 342234370Sjasone#endif 343234370Sjasone 344234370Sjasone#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) 345286866SjasoneJEMALLOC_ALWAYS_INLINE bool 346286866Sjasoneprof_active_get_unlocked(void) 347235238Sjasone{ 348235238Sjasone 349286866Sjasone /* 350286866Sjasone * Even if opt_prof is true, sampling can be temporarily disabled by 351286866Sjasone * setting prof_active to false. No locking is used when reading 352286866Sjasone * prof_active in the fast path, so there are no guarantees regarding 353286866Sjasone * how long it will take for all threads to notice state changes. 354286866Sjasone */ 355286866Sjasone return (prof_active); 356235238Sjasone} 357235238Sjasone 358286866SjasoneJEMALLOC_ALWAYS_INLINE bool 359286866Sjasoneprof_gdump_get_unlocked(void) 360234370Sjasone{ 361286866Sjasone 362261071Sjasone /* 363286866Sjasone * No locking is used when reading prof_gdump_val in the fast path, so 364286866Sjasone * there are no guarantees regarding how long it will take for all 365286866Sjasone * threads to notice state changes. 366261071Sjasone */ 367286866Sjasone return (prof_gdump_val); 368286866Sjasone} 369234370Sjasone 370286866SjasoneJEMALLOC_ALWAYS_INLINE prof_tdata_t * 371286866Sjasoneprof_tdata_get(tsd_t *tsd, bool create) 372286866Sjasone{ 373286866Sjasone prof_tdata_t *tdata; 374286866Sjasone 375234370Sjasone cassert(config_prof); 376234370Sjasone 377286866Sjasone tdata = tsd_prof_tdata_get(tsd); 378286866Sjasone if (create) { 379286866Sjasone if (unlikely(tdata == NULL)) { 380286866Sjasone if (tsd_nominal(tsd)) { 381286866Sjasone tdata = prof_tdata_init(tsd); 382286866Sjasone tsd_prof_tdata_set(tsd, tdata); 383286866Sjasone } 384286866Sjasone } else if (unlikely(tdata->expired)) { 385286866Sjasone tdata = prof_tdata_reinit(tsd, tdata); 386286866Sjasone tsd_prof_tdata_set(tsd, tdata); 387286866Sjasone } 388286866Sjasone assert(tdata == NULL || tdata->attached); 389286866Sjasone } 390286866Sjasone 391286866Sjasone return (tdata); 392234370Sjasone} 393234370Sjasone 394286866SjasoneJEMALLOC_ALWAYS_INLINE prof_tctx_t * 395286866Sjasoneprof_tctx_get(const void *ptr) 396234370Sjasone{ 397234370Sjasone 398234370Sjasone cassert(config_prof); 399234370Sjasone assert(ptr != NULL); 400234370Sjasone 401286866Sjasone return (arena_prof_tctx_get(ptr)); 402234370Sjasone} 403234370Sjasone 404286866SjasoneJEMALLOC_ALWAYS_INLINE void 405286866Sjasoneprof_tctx_set(const void *ptr, prof_tctx_t *tctx) 406234370Sjasone{ 407234370Sjasone 408234370Sjasone cassert(config_prof); 409234370Sjasone assert(ptr != NULL); 410234370Sjasone 411286866Sjasone arena_prof_tctx_set(ptr, tctx); 412234370Sjasone} 413234370Sjasone 414286866SjasoneJEMALLOC_ALWAYS_INLINE bool 415286866Sjasoneprof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, 416286866Sjasone prof_tdata_t **tdata_out) 417234370Sjasone{ 418286866Sjasone prof_tdata_t *tdata; 419234370Sjasone 420234370Sjasone cassert(config_prof); 421234370Sjasone 422286866Sjasone tdata = prof_tdata_get(tsd, true); 423286866Sjasone if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) 424286866Sjasone tdata = NULL; 425286866Sjasone 426286866Sjasone if (tdata_out != NULL) 427286866Sjasone *tdata_out = tdata; 428286866Sjasone 429286866Sjasone if (tdata == NULL) 430235238Sjasone return (true); 431234370Sjasone 432286866Sjasone if (tdata->bytes_until_sample >= usize) { 433286866Sjasone if (update) 434286866Sjasone tdata->bytes_until_sample -= usize; 435286866Sjasone return (true); 436286866Sjasone } else { 437234370Sjasone /* Compute new sample threshold. */ 438286866Sjasone if (update) 439286866Sjasone prof_sample_threshold_update(tdata); 440286866Sjasone return (!tdata->active); 441234370Sjasone } 442234370Sjasone} 443234370Sjasone 444286866SjasoneJEMALLOC_ALWAYS_INLINE prof_tctx_t * 445286866Sjasoneprof_alloc_prep(tsd_t *tsd, size_t usize, bool update) 446234370Sjasone{ 447286866Sjasone prof_tctx_t *ret; 448286866Sjasone prof_tdata_t *tdata; 449286866Sjasone prof_bt_t bt; 450234370Sjasone 451286866Sjasone assert(usize == s2u(usize)); 452286866Sjasone 453286866Sjasone if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd, 454286866Sjasone usize, update, &tdata))) 455286866Sjasone ret = (prof_tctx_t *)(uintptr_t)1U; 456286866Sjasone else { 457286866Sjasone bt_init(&bt, tdata->vec); 458286866Sjasone prof_backtrace(&bt); 459286866Sjasone ret = prof_lookup(tsd, &bt); 460286866Sjasone } 461286866Sjasone 462286866Sjasone return (ret); 463286866Sjasone} 464286866Sjasone 465286866SjasoneJEMALLOC_ALWAYS_INLINE void 466286866Sjasoneprof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) 467286866Sjasone{ 468286866Sjasone 469234370Sjasone cassert(config_prof); 470234370Sjasone assert(ptr != NULL); 471261071Sjasone assert(usize == isalloc(ptr, true)); 472234370Sjasone 473286866Sjasone if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) 474286866Sjasone prof_malloc_sample_object(ptr, usize, tctx); 475286866Sjasone else 476286866Sjasone prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); 477234370Sjasone} 478234370Sjasone 479286866SjasoneJEMALLOC_ALWAYS_INLINE void 480286866Sjasoneprof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, 481286866Sjasone bool updated, size_t old_usize, prof_tctx_t *old_tctx) 482234370Sjasone{ 483234370Sjasone 484234370Sjasone cassert(config_prof); 485286866Sjasone assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); 486234370Sjasone 487286866Sjasone if (!updated && ptr != NULL) { 488261071Sjasone assert(usize == isalloc(ptr, true)); 489286866Sjasone if (prof_sample_accum_update(tsd, usize, true, NULL)) { 490234370Sjasone /* 491286866Sjasone * Don't sample. The usize passed to PROF_ALLOC_PREP() 492286866Sjasone * was larger than what actually got allocated, so a 493286866Sjasone * backtrace was captured for this allocation, even 494286866Sjasone * though its actual usize was insufficient to cross the 495286866Sjasone * sample threshold. 496234370Sjasone */ 497286866Sjasone tctx = (prof_tctx_t *)(uintptr_t)1U; 498234370Sjasone } 499286866Sjasone } 500234370Sjasone 501286866Sjasone if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U)) 502286866Sjasone prof_free_sampled_object(tsd, old_usize, old_tctx); 503286866Sjasone if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) 504286866Sjasone prof_malloc_sample_object(ptr, usize, tctx); 505286866Sjasone else 506286866Sjasone prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); 507234370Sjasone} 508234370Sjasone 509286866SjasoneJEMALLOC_ALWAYS_INLINE void 510286866Sjasoneprof_free(tsd_t *tsd, const void *ptr, size_t usize) 511234370Sjasone{ 512286866Sjasone prof_tctx_t *tctx = prof_tctx_get(ptr); 513234370Sjasone 514234370Sjasone cassert(config_prof); 515286866Sjasone assert(usize == isalloc(ptr, true)); 516234370Sjasone 517286866Sjasone if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) 518286866Sjasone prof_free_sampled_object(tsd, usize, tctx); 519234370Sjasone} 520234370Sjasone#endif 521234370Sjasone 522234370Sjasone#endif /* JEMALLOC_H_INLINES */ 523234370Sjasone/******************************************************************************/ 524