Deleted Added
full compact
prof.c (296221) prof.c (299587)
1#define JEMALLOC_PROF_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif

--- 107 unchanged lines hidden (view full) ---

116static bool prof_booted = false;
117
118/******************************************************************************/
119/*
120 * Function prototypes for static functions that are referenced prior to
121 * definition.
122 */
123
1#define JEMALLOC_PROF_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif

--- 107 unchanged lines hidden (view full) ---

116static bool prof_booted = false;
117
118/******************************************************************************/
119/*
120 * Function prototypes for static functions that are referenced prior to
121 * definition.
122 */
123
124static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
124static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
125static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
125static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
126static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
126static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
127 bool even_if_attached);
127 bool even_if_attached);
128static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
128static void prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
129 bool even_if_attached);
129 bool even_if_attached);
130static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
130static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
131
132/******************************************************************************/
133/* Red-black trees. */
134
135JEMALLOC_INLINE_C int
136prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
137{
138 uint64_t a_thr_uid = a->thr_uid;

--- 69 unchanged lines hidden (view full) ---

208 * programs.
209 */
210 tdata = prof_tdata_get(tsd, true);
211 if (tdata != NULL)
212 prof_sample_threshold_update(tdata);
213 }
214
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
131
132/******************************************************************************/
133/* Red-black trees. */
134
135JEMALLOC_INLINE_C int
136prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
137{
138 uint64_t a_thr_uid = a->thr_uid;

--- 69 unchanged lines hidden (view full) ---

208 * programs.
209 */
210 tdata = prof_tdata_get(tsd, true);
211 if (tdata != NULL)
212 prof_sample_threshold_update(tdata);
213 }
214
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
216 malloc_mutex_lock(tctx->tdata->lock);
216 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
217 tctx->prepared = false;
217 tctx->prepared = false;
218 if (prof_tctx_should_destroy(tctx))
218 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
219 prof_tctx_destroy(tsd, tctx);
220 else
219 prof_tctx_destroy(tsd, tctx);
220 else
221 malloc_mutex_unlock(tctx->tdata->lock);
221 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
222 }
223}
224
225void
222 }
223}
224
225void
226prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
226prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
227 prof_tctx_t *tctx)
227{
228
228{
229
229 prof_tctx_set(ptr, usize, tctx);
230 prof_tctx_set(tsdn, ptr, usize, tctx);
230
231
231 malloc_mutex_lock(tctx->tdata->lock);
232 malloc_mutex_lock(tsdn, tctx->tdata->lock);
232 tctx->cnts.curobjs++;
233 tctx->cnts.curbytes += usize;
234 if (opt_prof_accum) {
235 tctx->cnts.accumobjs++;
236 tctx->cnts.accumbytes += usize;
237 }
238 tctx->prepared = false;
233 tctx->cnts.curobjs++;
234 tctx->cnts.curbytes += usize;
235 if (opt_prof_accum) {
236 tctx->cnts.accumobjs++;
237 tctx->cnts.accumbytes += usize;
238 }
239 tctx->prepared = false;
239 malloc_mutex_unlock(tctx->tdata->lock);
240 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
240}
241
242void
243prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
244{
245
241}
242
243void
244prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
245{
246
246 malloc_mutex_lock(tctx->tdata->lock);
247 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
247 assert(tctx->cnts.curobjs > 0);
248 assert(tctx->cnts.curbytes >= usize);
249 tctx->cnts.curobjs--;
250 tctx->cnts.curbytes -= usize;
251
248 assert(tctx->cnts.curobjs > 0);
249 assert(tctx->cnts.curbytes >= usize);
250 tctx->cnts.curobjs--;
251 tctx->cnts.curbytes -= usize;
252
252 if (prof_tctx_should_destroy(tctx))
253 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
253 prof_tctx_destroy(tsd, tctx);
254 else
254 prof_tctx_destroy(tsd, tctx);
255 else
255 malloc_mutex_unlock(tctx->tdata->lock);
256 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
256}
257
258void
259bt_init(prof_bt_t *bt, void **vec)
260{
261
262 cassert(config_prof);
263

--- 8 unchanged lines hidden (view full) ---

272 cassert(config_prof);
273 assert(tdata == prof_tdata_get(tsd, false));
274
275 if (tdata != NULL) {
276 assert(!tdata->enq);
277 tdata->enq = true;
278 }
279
257}
258
259void
260bt_init(prof_bt_t *bt, void **vec)
261{
262
263 cassert(config_prof);
264

--- 8 unchanged lines hidden (view full) ---

273 cassert(config_prof);
274 assert(tdata == prof_tdata_get(tsd, false));
275
276 if (tdata != NULL) {
277 assert(!tdata->enq);
278 tdata->enq = true;
279 }
280
280 malloc_mutex_lock(&bt2gctx_mtx);
281 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
281}
282
283JEMALLOC_INLINE_C void
284prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
285{
286
287 cassert(config_prof);
288 assert(tdata == prof_tdata_get(tsd, false));
289
282}
283
284JEMALLOC_INLINE_C void
285prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
286{
287
288 cassert(config_prof);
289 assert(tdata == prof_tdata_get(tsd, false));
290
290 malloc_mutex_unlock(&bt2gctx_mtx);
291 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
291
292 if (tdata != NULL) {
293 bool idump, gdump;
294
295 assert(tdata->enq);
296 tdata->enq = false;
297 idump = tdata->enq_idump;
298 tdata->enq_idump = false;
299 gdump = tdata->enq_gdump;
300 tdata->enq_gdump = false;
301
302 if (idump)
292
293 if (tdata != NULL) {
294 bool idump, gdump;
295
296 assert(tdata->enq);
297 tdata->enq = false;
298 idump = tdata->enq_idump;
299 tdata->enq_idump = false;
300 gdump = tdata->enq_gdump;
301 tdata->enq_gdump = false;
302
303 if (idump)
303 prof_idump();
304 prof_idump(tsd_tsdn(tsd));
304 if (gdump)
305 if (gdump)
305 prof_gdump();
306 prof_gdump(tsd_tsdn(tsd));
306 }
307}
308
309#ifdef JEMALLOC_PROF_LIBUNWIND
310void
311prof_backtrace(prof_bt_t *bt)
312{
313 int nframes;

--- 227 unchanged lines hidden (view full) ---

541static malloc_mutex_t *
542prof_tdata_mutex_choose(uint64_t thr_uid)
543{
544
545 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
546}
547
548static prof_gctx_t *
307 }
308}
309
310#ifdef JEMALLOC_PROF_LIBUNWIND
311void
312prof_backtrace(prof_bt_t *bt)
313{
314 int nframes;

--- 227 unchanged lines hidden (view full) ---

542static malloc_mutex_t *
543prof_tdata_mutex_choose(uint64_t thr_uid)
544{
545
546 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
547}
548
549static prof_gctx_t *
549prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
550prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
550{
551 /*
552 * Create a single allocation that has space for vec of length bt->len.
553 */
554 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
551{
552 /*
553 * Create a single allocation that has space for vec of length bt->len.
554 */
555 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
555 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
556 size2index(size), false, tcache_get(tsd, true), true, NULL, true);
556 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
557 size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
558 true);
557 if (gctx == NULL)
558 return (NULL);
559 gctx->lock = prof_gctx_mutex_choose();
560 /*
561 * Set nlimbo to 1, in order to avoid a race condition with
562 * prof_tctx_destroy()/prof_gctx_try_destroy().
563 */
564 gctx->nlimbo = 1;

--- 15 unchanged lines hidden (view full) ---

580 /*
581 * Check that gctx is still unused by any thread cache before destroying
582 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
583 * condition with this function, as does prof_tctx_destroy() in order to
584 * avoid a race between the main body of prof_tctx_destroy() and entry
585 * into this function.
586 */
587 prof_enter(tsd, tdata_self);
559 if (gctx == NULL)
560 return (NULL);
561 gctx->lock = prof_gctx_mutex_choose();
562 /*
563 * Set nlimbo to 1, in order to avoid a race condition with
564 * prof_tctx_destroy()/prof_gctx_try_destroy().
565 */
566 gctx->nlimbo = 1;

--- 15 unchanged lines hidden (view full) ---

582 /*
583 * Check that gctx is still unused by any thread cache before destroying
584 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
585 * condition with this function, as does prof_tctx_destroy() in order to
586 * avoid a race between the main body of prof_tctx_destroy() and entry
587 * into this function.
588 */
589 prof_enter(tsd, tdata_self);
588 malloc_mutex_lock(gctx->lock);
590 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
589 assert(gctx->nlimbo != 0);
590 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
591 /* Remove gctx from bt2gctx. */
591 assert(gctx->nlimbo != 0);
592 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
593 /* Remove gctx from bt2gctx. */
592 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
594 if (ckh_remove(tsd_tsdn(tsd), &bt2gctx, &gctx->bt, NULL, NULL))
593 not_reached();
594 prof_leave(tsd, tdata_self);
595 /* Destroy gctx. */
595 not_reached();
596 prof_leave(tsd, tdata_self);
597 /* Destroy gctx. */
596 malloc_mutex_unlock(gctx->lock);
597 idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
598 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
599 idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
598 } else {
599 /*
600 * Compensate for increment in prof_tctx_destroy() or
601 * prof_lookup().
602 */
603 gctx->nlimbo--;
600 } else {
601 /*
602 * Compensate for increment in prof_tctx_destroy() or
603 * prof_lookup().
604 */
605 gctx->nlimbo--;
604 malloc_mutex_unlock(gctx->lock);
606 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
605 prof_leave(tsd, tdata_self);
606 }
607}
608
607 prof_leave(tsd, tdata_self);
608 }
609}
610
609/* tctx->tdata->lock must be held. */
610static bool
611static bool
611prof_tctx_should_destroy(prof_tctx_t *tctx)
612prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
612{
613
613{
614
615 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
616
614 if (opt_prof_accum)
615 return (false);
616 if (tctx->cnts.curobjs != 0)
617 return (false);
618 if (tctx->prepared)
619 return (false);
620 return (true);
621}

--- 6 unchanged lines hidden (view full) ---

628 return (false);
629 if (!tctx_tree_empty(&gctx->tctxs))
630 return (false);
631 if (gctx->nlimbo != 0)
632 return (false);
633 return (true);
634}
635
617 if (opt_prof_accum)
618 return (false);
619 if (tctx->cnts.curobjs != 0)
620 return (false);
621 if (tctx->prepared)
622 return (false);
623 return (true);
624}

--- 6 unchanged lines hidden (view full) ---

631 return (false);
632 if (!tctx_tree_empty(&gctx->tctxs))
633 return (false);
634 if (gctx->nlimbo != 0)
635 return (false);
636 return (true);
637}
638
636/* tctx->tdata->lock is held upon entry, and released before return. */
637static void
638prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
639{
640 prof_tdata_t *tdata = tctx->tdata;
641 prof_gctx_t *gctx = tctx->gctx;
642 bool destroy_tdata, destroy_tctx, destroy_gctx;
643
639static void
640prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
641{
642 prof_tdata_t *tdata = tctx->tdata;
643 prof_gctx_t *gctx = tctx->gctx;
644 bool destroy_tdata, destroy_tctx, destroy_gctx;
645
646 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
647
644 assert(tctx->cnts.curobjs == 0);
645 assert(tctx->cnts.curbytes == 0);
646 assert(!opt_prof_accum);
647 assert(tctx->cnts.accumobjs == 0);
648 assert(tctx->cnts.accumbytes == 0);
649
648 assert(tctx->cnts.curobjs == 0);
649 assert(tctx->cnts.curbytes == 0);
650 assert(!opt_prof_accum);
651 assert(tctx->cnts.accumobjs == 0);
652 assert(tctx->cnts.accumbytes == 0);
653
650 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
651 destroy_tdata = prof_tdata_should_destroy(tdata, false);
652 malloc_mutex_unlock(tdata->lock);
654 ckh_remove(tsd_tsdn(tsd), &tdata->bt2tctx, &gctx->bt, NULL, NULL);
655 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
656 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
653
657
654 malloc_mutex_lock(gctx->lock);
658 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
655 switch (tctx->state) {
656 case prof_tctx_state_nominal:
657 tctx_tree_remove(&gctx->tctxs, tctx);
658 destroy_tctx = true;
659 if (prof_gctx_should_destroy(gctx)) {
660 /*
661 * Increment gctx->nlimbo in order to keep another
662 * thread from winning the race to destroy gctx while

--- 23 unchanged lines hidden (view full) ---

686 destroy_tctx = false;
687 destroy_gctx = false;
688 break;
689 default:
690 not_reached();
691 destroy_tctx = false;
692 destroy_gctx = false;
693 }
659 switch (tctx->state) {
660 case prof_tctx_state_nominal:
661 tctx_tree_remove(&gctx->tctxs, tctx);
662 destroy_tctx = true;
663 if (prof_gctx_should_destroy(gctx)) {
664 /*
665 * Increment gctx->nlimbo in order to keep another
666 * thread from winning the race to destroy gctx while

--- 23 unchanged lines hidden (view full) ---

690 destroy_tctx = false;
691 destroy_gctx = false;
692 break;
693 default:
694 not_reached();
695 destroy_tctx = false;
696 destroy_gctx = false;
697 }
694 malloc_mutex_unlock(gctx->lock);
698 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
695 if (destroy_gctx) {
696 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
697 tdata);
698 }
699
699 if (destroy_gctx) {
700 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
701 tdata);
702 }
703
704 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
705
700 if (destroy_tdata)
706 if (destroy_tdata)
701 prof_tdata_destroy(tsd, tdata, false);
707 prof_tdata_destroy(tsd_tsdn(tsd), tdata, false);
702
703 if (destroy_tctx)
708
709 if (destroy_tctx)
704 idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
710 idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
705}
706
707static bool
708prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
709 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
710{
711 union {
712 prof_gctx_t *p;
713 void *v;
714 } gctx;
715 union {
716 prof_bt_t *p;
717 void *v;
718 } btkey;
719 bool new_gctx;
720
721 prof_enter(tsd, tdata);
722 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
723 /* bt has never been seen before. Insert it. */
711}
712
713static bool
714prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
715 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
716{
717 union {
718 prof_gctx_t *p;
719 void *v;
720 } gctx;
721 union {
722 prof_bt_t *p;
723 void *v;
724 } btkey;
725 bool new_gctx;
726
727 prof_enter(tsd, tdata);
728 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
729 /* bt has never been seen before. Insert it. */
724 gctx.p = prof_gctx_create(tsd, bt);
730 gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
725 if (gctx.v == NULL) {
726 prof_leave(tsd, tdata);
727 return (true);
728 }
729 btkey.p = &gctx.p->bt;
731 if (gctx.v == NULL) {
732 prof_leave(tsd, tdata);
733 return (true);
734 }
735 btkey.p = &gctx.p->bt;
730 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
736 if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) {
731 /* OOM. */
732 prof_leave(tsd, tdata);
737 /* OOM. */
738 prof_leave(tsd, tdata);
733 idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
734 true);
739 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
735 return (true);
736 }
737 new_gctx = true;
738 } else {
739 /*
740 * Increment nlimbo, in order to avoid a race condition with
741 * prof_tctx_destroy()/prof_gctx_try_destroy().
742 */
740 return (true);
741 }
742 new_gctx = true;
743 } else {
744 /*
745 * Increment nlimbo, in order to avoid a race condition with
746 * prof_tctx_destroy()/prof_gctx_try_destroy().
747 */
743 malloc_mutex_lock(gctx.p->lock);
748 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
744 gctx.p->nlimbo++;
749 gctx.p->nlimbo++;
745 malloc_mutex_unlock(gctx.p->lock);
750 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
746 new_gctx = false;
747 }
748 prof_leave(tsd, tdata);
749
750 *p_btkey = btkey.v;
751 *p_gctx = gctx.p;
752 *p_new_gctx = new_gctx;
753 return (false);

--- 10 unchanged lines hidden (view full) ---

764 bool not_found;
765
766 cassert(config_prof);
767
768 tdata = prof_tdata_get(tsd, false);
769 if (tdata == NULL)
770 return (NULL);
771
751 new_gctx = false;
752 }
753 prof_leave(tsd, tdata);
754
755 *p_btkey = btkey.v;
756 *p_gctx = gctx.p;
757 *p_new_gctx = new_gctx;
758 return (false);

--- 10 unchanged lines hidden (view full) ---

769 bool not_found;
770
771 cassert(config_prof);
772
773 tdata = prof_tdata_get(tsd, false);
774 if (tdata == NULL)
775 return (NULL);
776
772 malloc_mutex_lock(tdata->lock);
777 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
773 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
774 if (!not_found) /* Note double negative! */
775 ret.p->prepared = true;
778 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
779 if (!not_found) /* Note double negative! */
780 ret.p->prepared = true;
776 malloc_mutex_unlock(tdata->lock);
781 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
777 if (not_found) {
782 if (not_found) {
778 tcache_t *tcache;
779 void *btkey;
780 prof_gctx_t *gctx;
781 bool new_gctx, error;
782
783 /*
784 * This thread's cache lacks bt. Look for it in the global
785 * cache.
786 */
787 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
788 &new_gctx))
789 return (NULL);
790
791 /* Link a prof_tctx_t into gctx for this thread. */
783 void *btkey;
784 prof_gctx_t *gctx;
785 bool new_gctx, error;
786
787 /*
788 * This thread's cache lacks bt. Look for it in the global
789 * cache.
790 */
791 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
792 &new_gctx))
793 return (NULL);
794
795 /* Link a prof_tctx_t into gctx for this thread. */
792 tcache = tcache_get(tsd, true);
793 ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
794 size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
795 true);
796 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
797 size2index(sizeof(prof_tctx_t)), false, NULL, true,
798 arena_ichoose(tsd_tsdn(tsd), NULL), true);
796 if (ret.p == NULL) {
797 if (new_gctx)
798 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
799 return (NULL);
800 }
801 ret.p->tdata = tdata;
802 ret.p->thr_uid = tdata->thr_uid;
803 ret.p->thr_discrim = tdata->thr_discrim;
804 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
805 ret.p->gctx = gctx;
806 ret.p->tctx_uid = tdata->tctx_uid_next++;
807 ret.p->prepared = true;
808 ret.p->state = prof_tctx_state_initializing;
799 if (ret.p == NULL) {
800 if (new_gctx)
801 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
802 return (NULL);
803 }
804 ret.p->tdata = tdata;
805 ret.p->thr_uid = tdata->thr_uid;
806 ret.p->thr_discrim = tdata->thr_discrim;
807 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
808 ret.p->gctx = gctx;
809 ret.p->tctx_uid = tdata->tctx_uid_next++;
810 ret.p->prepared = true;
811 ret.p->state = prof_tctx_state_initializing;
809 malloc_mutex_lock(tdata->lock);
810 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
811 malloc_mutex_unlock(tdata->lock);
812 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
813 error = ckh_insert(tsd_tsdn(tsd), &tdata->bt2tctx, btkey,
814 ret.v);
815 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
812 if (error) {
813 if (new_gctx)
814 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
816 if (error) {
817 if (new_gctx)
818 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
815 idalloctm(tsd, ret.v, tcache, true, true);
819 idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
816 return (NULL);
817 }
820 return (NULL);
821 }
818 malloc_mutex_lock(gctx->lock);
822 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
819 ret.p->state = prof_tctx_state_nominal;
820 tctx_tree_insert(&gctx->tctxs, ret.p);
821 gctx->nlimbo--;
823 ret.p->state = prof_tctx_state_nominal;
824 tctx_tree_insert(&gctx->tctxs, ret.p);
825 gctx->nlimbo--;
822 malloc_mutex_unlock(gctx->lock);
826 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
823 }
824
825 return (ret.p);
826}
827
827 }
828
829 return (ret.p);
830}
831
832/*
833 * The bodies of this function and prof_leakcheck() are compiled out unless heap
834 * profiling is enabled, so that it is possible to compile jemalloc with
835 * floating point support completely disabled. Avoiding floating point code is
836 * important on memory-constrained systems, but it also enables a workaround for
837 * versions of glibc that don't properly save/restore floating point registers
838 * during dynamic lazy symbol loading (which internally calls into whatever
839 * malloc implementation happens to be integrated into the application). Note
840 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
841 * memory moves, so jemalloc must be compiled with such optimizations disabled
842 * (e.g.
843 * -mno-sse) in order for the workaround to be complete.
844 */
828void
829prof_sample_threshold_update(prof_tdata_t *tdata)
830{
845void
846prof_sample_threshold_update(prof_tdata_t *tdata)
847{
831 /*
832 * The body of this function is compiled out unless heap profiling is
833 * enabled, so that it is possible to compile jemalloc with floating
834 * point support completely disabled. Avoiding floating point code is
835 * important on memory-constrained systems, but it also enables a
836 * workaround for versions of glibc that don't properly save/restore
837 * floating point registers during dynamic lazy symbol loading (which
838 * internally calls into whatever malloc implementation happens to be
839 * integrated into the application). Note that some compilers (e.g.
840 * gcc 4.8) may use floating point registers for fast memory moves, so
841 * jemalloc must be compiled with such optimizations disabled (e.g.
842 * -mno-sse) in order for the workaround to be complete.
843 */
844#ifdef JEMALLOC_PROF
845 uint64_t r;
846 double u;
847
848 if (!config_prof)
849 return;
850
851 if (lg_prof_sample == 0) {

--- 37 unchanged lines hidden (view full) ---

889
890 return (NULL);
891}
892
893size_t
894prof_tdata_count(void)
895{
896 size_t tdata_count = 0;
848#ifdef JEMALLOC_PROF
849 uint64_t r;
850 double u;
851
852 if (!config_prof)
853 return;
854
855 if (lg_prof_sample == 0) {

--- 37 unchanged lines hidden (view full) ---

893
894 return (NULL);
895}
896
897size_t
898prof_tdata_count(void)
899{
900 size_t tdata_count = 0;
901 tsdn_t *tsdn;
897
902
898 malloc_mutex_lock(&tdatas_mtx);
903 tsdn = tsdn_fetch();
904 malloc_mutex_lock(tsdn, &tdatas_mtx);
899 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
900 (void *)&tdata_count);
905 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
906 (void *)&tdata_count);
901 malloc_mutex_unlock(&tdatas_mtx);
907 malloc_mutex_unlock(tsdn, &tdatas_mtx);
902
903 return (tdata_count);
904}
905#endif
906
907#ifdef JEMALLOC_JET
908size_t
909prof_bt_count(void)
910{
911 size_t bt_count;
912 tsd_t *tsd;
913 prof_tdata_t *tdata;
914
915 tsd = tsd_fetch();
916 tdata = prof_tdata_get(tsd, false);
917 if (tdata == NULL)
918 return (0);
919
908
909 return (tdata_count);
910}
911#endif
912
913#ifdef JEMALLOC_JET
914size_t
915prof_bt_count(void)
916{
917 size_t bt_count;
918 tsd_t *tsd;
919 prof_tdata_t *tdata;
920
921 tsd = tsd_fetch();
922 tdata = prof_tdata_get(tsd, false);
923 if (tdata == NULL)
924 return (0);
925
920 malloc_mutex_lock(&bt2gctx_mtx);
926 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
921 bt_count = ckh_count(&bt2gctx);
927 bt_count = ckh_count(&bt2gctx);
922 malloc_mutex_unlock(&bt2gctx_mtx);
928 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
923
924 return (bt_count);
925}
926#endif
927
928#ifdef JEMALLOC_JET
929#undef prof_dump_open
930#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)

--- 96 unchanged lines hidden (view full) ---

1027 va_start(ap, format);
1028 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1029 va_end(ap);
1030 ret = prof_dump_write(propagate_err, buf);
1031
1032 return (ret);
1033}
1034
929
930 return (bt_count);
931}
932#endif
933
934#ifdef JEMALLOC_JET
935#undef prof_dump_open
936#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)

--- 96 unchanged lines hidden (view full) ---

1033 va_start(ap, format);
1034 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1035 va_end(ap);
1036 ret = prof_dump_write(propagate_err, buf);
1037
1038 return (ret);
1039}
1040
1035/* tctx->tdata->lock is held. */
1036static void
1041static void
1037prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
1042prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
1038{
1039
1043{
1044
1040 malloc_mutex_lock(tctx->gctx->lock);
1045 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
1041
1046
1047 malloc_mutex_lock(tsdn, tctx->gctx->lock);
1048
1042 switch (tctx->state) {
1043 case prof_tctx_state_initializing:
1049 switch (tctx->state) {
1050 case prof_tctx_state_initializing:
1044 malloc_mutex_unlock(tctx->gctx->lock);
1051 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1045 return;
1046 case prof_tctx_state_nominal:
1047 tctx->state = prof_tctx_state_dumping;
1052 return;
1053 case prof_tctx_state_nominal:
1054 tctx->state = prof_tctx_state_dumping;
1048 malloc_mutex_unlock(tctx->gctx->lock);
1055 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1049
1050 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1051
1052 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1053 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1054 if (opt_prof_accum) {
1055 tdata->cnt_summed.accumobjs +=
1056 tctx->dump_cnts.accumobjs;
1057 tdata->cnt_summed.accumbytes +=
1058 tctx->dump_cnts.accumbytes;
1059 }
1060 break;
1061 case prof_tctx_state_dumping:
1062 case prof_tctx_state_purgatory:
1063 not_reached();
1064 }
1065}
1066
1056
1057 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1058
1059 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1060 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1061 if (opt_prof_accum) {
1062 tdata->cnt_summed.accumobjs +=
1063 tctx->dump_cnts.accumobjs;
1064 tdata->cnt_summed.accumbytes +=
1065 tctx->dump_cnts.accumbytes;
1066 }
1067 break;
1068 case prof_tctx_state_dumping:
1069 case prof_tctx_state_purgatory:
1070 not_reached();
1071 }
1072}
1073
1067/* gctx->lock is held. */
1068static void
1074static void
1069prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
1075prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
1070{
1071
1076{
1077
1078 malloc_mutex_assert_owner(tsdn, gctx->lock);
1079
1072 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1073 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1074 if (opt_prof_accum) {
1075 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1076 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1077 }
1078}
1079
1080 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1081 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1082 if (opt_prof_accum) {
1083 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1084 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1085 }
1086}
1087
1080/* tctx->gctx is held. */
1081static prof_tctx_t *
1082prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1083{
1088static prof_tctx_t *
1089prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1090{
1091 tsdn_t *tsdn = (tsdn_t *)arg;
1084
1092
1093 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1094
1085 switch (tctx->state) {
1086 case prof_tctx_state_nominal:
1087 /* New since dumping started; ignore. */
1088 break;
1089 case prof_tctx_state_dumping:
1090 case prof_tctx_state_purgatory:
1095 switch (tctx->state) {
1096 case prof_tctx_state_nominal:
1097 /* New since dumping started; ignore. */
1098 break;
1099 case prof_tctx_state_dumping:
1100 case prof_tctx_state_purgatory:
1091 prof_tctx_merge_gctx(tctx, tctx->gctx);
1101 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
1092 break;
1093 default:
1094 not_reached();
1095 }
1096
1097 return (NULL);
1098}
1099
1102 break;
1103 default:
1104 not_reached();
1105 }
1106
1107 return (NULL);
1108}
1109
1100/* gctx->lock is held. */
1110struct prof_tctx_dump_iter_arg_s {
1111 tsdn_t *tsdn;
1112 bool propagate_err;
1113};
1114
1101static prof_tctx_t *
1115static prof_tctx_t *
1102prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1116prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
1103{
1117{
1104 bool propagate_err = *(bool *)arg;
1118 struct prof_tctx_dump_iter_arg_s *arg =
1119 (struct prof_tctx_dump_iter_arg_s *)opaque;
1105
1120
1121 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
1122
1106 switch (tctx->state) {
1107 case prof_tctx_state_initializing:
1108 case prof_tctx_state_nominal:
1109 /* Not captured by this dump. */
1110 break;
1111 case prof_tctx_state_dumping:
1112 case prof_tctx_state_purgatory:
1123 switch (tctx->state) {
1124 case prof_tctx_state_initializing:
1125 case prof_tctx_state_nominal:
1126 /* Not captured by this dump. */
1127 break;
1128 case prof_tctx_state_dumping:
1129 case prof_tctx_state_purgatory:
1113 if (prof_dump_printf(propagate_err,
1130 if (prof_dump_printf(arg->propagate_err,
1114 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1115 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1116 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1117 tctx->dump_cnts.accumbytes))
1118 return (tctx);
1119 break;
1120 default:
1121 not_reached();
1122 }
1123 return (NULL);
1124}
1125
1131 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1132 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1133 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1134 tctx->dump_cnts.accumbytes))
1135 return (tctx);
1136 break;
1137 default:
1138 not_reached();
1139 }
1140 return (NULL);
1141}
1142
1126/* tctx->gctx is held. */
1127static prof_tctx_t *
1128prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1129{
1143static prof_tctx_t *
1144prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1145{
1146 tsdn_t *tsdn = (tsdn_t *)arg;
1130 prof_tctx_t *ret;
1131
1147 prof_tctx_t *ret;
1148
1149 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1150
1132 switch (tctx->state) {
1133 case prof_tctx_state_nominal:
1134 /* New since dumping started; ignore. */
1135 break;
1136 case prof_tctx_state_dumping:
1137 tctx->state = prof_tctx_state_nominal;
1138 break;
1139 case prof_tctx_state_purgatory:

--- 4 unchanged lines hidden (view full) ---

1144 }
1145
1146 ret = NULL;
1147label_return:
1148 return (ret);
1149}
1150
1151static void
1151 switch (tctx->state) {
1152 case prof_tctx_state_nominal:
1153 /* New since dumping started; ignore. */
1154 break;
1155 case prof_tctx_state_dumping:
1156 tctx->state = prof_tctx_state_nominal;
1157 break;
1158 case prof_tctx_state_purgatory:

--- 4 unchanged lines hidden (view full) ---

1163 }
1164
1165 ret = NULL;
1166label_return:
1167 return (ret);
1168}
1169
1170static void
1152prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1171prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
1153{
1154
1155 cassert(config_prof);
1156
1172{
1173
1174 cassert(config_prof);
1175
1157 malloc_mutex_lock(gctx->lock);
1176 malloc_mutex_lock(tsdn, gctx->lock);
1158
1159 /*
1160 * Increment nlimbo so that gctx won't go away before dump.
1161 * Additionally, link gctx into the dump list so that it is included in
1162 * prof_dump()'s second pass.
1163 */
1164 gctx->nlimbo++;
1165 gctx_tree_insert(gctxs, gctx);
1166
1167 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1168
1177
1178 /*
1179 * Increment nlimbo so that gctx won't go away before dump.
1180 * Additionally, link gctx into the dump list so that it is included in
1181 * prof_dump()'s second pass.
1182 */
1183 gctx->nlimbo++;
1184 gctx_tree_insert(gctxs, gctx);
1185
1186 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1187
1169 malloc_mutex_unlock(gctx->lock);
1188 malloc_mutex_unlock(tsdn, gctx->lock);
1170}
1171
1189}
1190
1191struct prof_gctx_merge_iter_arg_s {
1192 tsdn_t *tsdn;
1193 size_t leak_ngctx;
1194};
1195
1172static prof_gctx_t *
1196static prof_gctx_t *
1173prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1197prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1174{
1198{
1175 size_t *leak_ngctx = (size_t *)arg;
1199 struct prof_gctx_merge_iter_arg_s *arg =
1200 (struct prof_gctx_merge_iter_arg_s *)opaque;
1176
1201
1177 malloc_mutex_lock(gctx->lock);
1178 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
1202 malloc_mutex_lock(arg->tsdn, gctx->lock);
1203 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
1204 (void *)arg->tsdn);
1179 if (gctx->cnt_summed.curobjs != 0)
1205 if (gctx->cnt_summed.curobjs != 0)
1180 (*leak_ngctx)++;
1181 malloc_mutex_unlock(gctx->lock);
1206 arg->leak_ngctx++;
1207 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1182
1183 return (NULL);
1184}
1185
1186static void
1187prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1188{
1189 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1190 prof_gctx_t *gctx;
1191
1192 /*
1193 * Standard tree iteration won't work here, because as soon as we
1194 * decrement gctx->nlimbo and unlock gctx, another thread can
1195 * concurrently destroy it, which will corrupt the tree. Therefore,
1196 * tear down the tree one node at a time during iteration.
1197 */
1198 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1199 gctx_tree_remove(gctxs, gctx);
1208
1209 return (NULL);
1210}
1211
1212static void
1213prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
1214{
1215 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1216 prof_gctx_t *gctx;
1217
1218 /*
1219 * Standard tree iteration won't work here, because as soon as we
1220 * decrement gctx->nlimbo and unlock gctx, another thread can
1221 * concurrently destroy it, which will corrupt the tree. Therefore,
1222 * tear down the tree one node at a time during iteration.
1223 */
1224 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1225 gctx_tree_remove(gctxs, gctx);
1200 malloc_mutex_lock(gctx->lock);
1226 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
1201 {
1202 prof_tctx_t *next;
1203
1204 next = NULL;
1205 do {
1206 prof_tctx_t *to_destroy =
1207 tctx_tree_iter(&gctx->tctxs, next,
1227 {
1228 prof_tctx_t *next;
1229
1230 next = NULL;
1231 do {
1232 prof_tctx_t *to_destroy =
1233 tctx_tree_iter(&gctx->tctxs, next,
1208 prof_tctx_finish_iter, NULL);
1234 prof_tctx_finish_iter,
1235 (void *)tsd_tsdn(tsd));
1209 if (to_destroy != NULL) {
1210 next = tctx_tree_next(&gctx->tctxs,
1211 to_destroy);
1212 tctx_tree_remove(&gctx->tctxs,
1213 to_destroy);
1236 if (to_destroy != NULL) {
1237 next = tctx_tree_next(&gctx->tctxs,
1238 to_destroy);
1239 tctx_tree_remove(&gctx->tctxs,
1240 to_destroy);
1214 idalloctm(tsd, to_destroy,
1215 tcache_get(tsd, false), true, true);
1241 idalloctm(tsd_tsdn(tsd), to_destroy,
1242 NULL, true, true);
1216 } else
1217 next = NULL;
1218 } while (next != NULL);
1219 }
1220 gctx->nlimbo--;
1221 if (prof_gctx_should_destroy(gctx)) {
1222 gctx->nlimbo++;
1243 } else
1244 next = NULL;
1245 } while (next != NULL);
1246 }
1247 gctx->nlimbo--;
1248 if (prof_gctx_should_destroy(gctx)) {
1249 gctx->nlimbo++;
1223 malloc_mutex_unlock(gctx->lock);
1250 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1224 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1225 } else
1251 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1252 } else
1226 malloc_mutex_unlock(gctx->lock);
1253 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1227 }
1228}
1229
1254 }
1255}
1256
1257struct prof_tdata_merge_iter_arg_s {
1258 tsdn_t *tsdn;
1259 prof_cnt_t cnt_all;
1260};
1261
1230static prof_tdata_t *
1262static prof_tdata_t *
1231prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1263prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1264 void *opaque)
1232{
1265{
1233 prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
1266 struct prof_tdata_merge_iter_arg_s *arg =
1267 (struct prof_tdata_merge_iter_arg_s *)opaque;
1234
1268
1235 malloc_mutex_lock(tdata->lock);
1269 malloc_mutex_lock(arg->tsdn, tdata->lock);
1236 if (!tdata->expired) {
1237 size_t tabind;
1238 union {
1239 prof_tctx_t *p;
1240 void *v;
1241 } tctx;
1242
1243 tdata->dumping = true;
1244 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1245 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1246 &tctx.v);)
1270 if (!tdata->expired) {
1271 size_t tabind;
1272 union {
1273 prof_tctx_t *p;
1274 void *v;
1275 } tctx;
1276
1277 tdata->dumping = true;
1278 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1279 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1280 &tctx.v);)
1247 prof_tctx_merge_tdata(tctx.p, tdata);
1281 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
1248
1282
1249 cnt_all->curobjs += tdata->cnt_summed.curobjs;
1250 cnt_all->curbytes += tdata->cnt_summed.curbytes;
1283 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1284 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
1251 if (opt_prof_accum) {
1285 if (opt_prof_accum) {
1252 cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
1253 cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
1286 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1287 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
1254 }
1255 } else
1256 tdata->dumping = false;
1288 }
1289 } else
1290 tdata->dumping = false;
1257 malloc_mutex_unlock(tdata->lock);
1291 malloc_mutex_unlock(arg->tsdn, tdata->lock);
1258
1259 return (NULL);
1260}
1261
1262static prof_tdata_t *
1263prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1264{
1265 bool propagate_err = *(bool *)arg;

--- 12 unchanged lines hidden (view full) ---

1278 return (NULL);
1279}
1280
1281#ifdef JEMALLOC_JET
1282#undef prof_dump_header
1283#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1284#endif
1285static bool
1292
1293 return (NULL);
1294}
1295
1296static prof_tdata_t *
1297prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1298{
1299 bool propagate_err = *(bool *)arg;

--- 12 unchanged lines hidden (view full) ---

1312 return (NULL);
1313}
1314
1315#ifdef JEMALLOC_JET
1316#undef prof_dump_header
1317#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1318#endif
1319static bool
1286prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
1320prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
1287{
1288 bool ret;
1289
1290 if (prof_dump_printf(propagate_err,
1291 "heap_v2/%"FMTu64"\n"
1292 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1293 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1294 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1295 return (true);
1296
1321{
1322 bool ret;
1323
1324 if (prof_dump_printf(propagate_err,
1325 "heap_v2/%"FMTu64"\n"
1326 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1327 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1328 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1329 return (true);
1330
1297 malloc_mutex_lock(&tdatas_mtx);
1331 malloc_mutex_lock(tsdn, &tdatas_mtx);
1298 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1299 (void *)&propagate_err) != NULL);
1332 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1333 (void *)&propagate_err) != NULL);
1300 malloc_mutex_unlock(&tdatas_mtx);
1334 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1301 return (ret);
1302}
1303#ifdef JEMALLOC_JET
1304#undef prof_dump_header
1305#define prof_dump_header JEMALLOC_N(prof_dump_header)
1306prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1307#endif
1308
1335 return (ret);
1336}
1337#ifdef JEMALLOC_JET
1338#undef prof_dump_header
1339#define prof_dump_header JEMALLOC_N(prof_dump_header)
1340prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1341#endif
1342
1309/* gctx->lock is held. */
1310static bool
1343static bool
1311prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
1312 prof_gctx_tree_t *gctxs)
1344prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
1345 const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
1313{
1314 bool ret;
1315 unsigned i;
1346{
1347 bool ret;
1348 unsigned i;
1349 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
1316
1317 cassert(config_prof);
1350
1351 cassert(config_prof);
1352 malloc_mutex_assert_owner(tsdn, gctx->lock);
1318
1319 /* Avoid dumping such gctx's that have no useful data. */
1320 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1321 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1322 assert(gctx->cnt_summed.curobjs == 0);
1323 assert(gctx->cnt_summed.curbytes == 0);
1324 assert(gctx->cnt_summed.accumobjs == 0);
1325 assert(gctx->cnt_summed.accumbytes == 0);

--- 17 unchanged lines hidden (view full) ---

1343 "\n"
1344 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1345 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1346 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1347 ret = true;
1348 goto label_return;
1349 }
1350
1353
1354 /* Avoid dumping such gctx's that have no useful data. */
1355 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1356 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1357 assert(gctx->cnt_summed.curobjs == 0);
1358 assert(gctx->cnt_summed.curbytes == 0);
1359 assert(gctx->cnt_summed.accumobjs == 0);
1360 assert(gctx->cnt_summed.accumbytes == 0);

--- 17 unchanged lines hidden (view full) ---

1378 "\n"
1379 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1380 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1381 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1382 ret = true;
1383 goto label_return;
1384 }
1385
1386 prof_tctx_dump_iter_arg.tsdn = tsdn;
1387 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
1351 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1388 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1352 (void *)&propagate_err) != NULL) {
1389 (void *)&prof_tctx_dump_iter_arg) != NULL) {
1353 ret = true;
1354 goto label_return;
1355 }
1356
1357 ret = false;
1358label_return:
1359 return (ret);
1360}

--- 76 unchanged lines hidden (view full) ---

1437
1438 ret = false;
1439label_return:
1440 if (mfd != -1)
1441 close(mfd);
1442 return (ret);
1443}
1444
1390 ret = true;
1391 goto label_return;
1392 }
1393
1394 ret = false;
1395label_return:
1396 return (ret);
1397}

--- 76 unchanged lines hidden (view full) ---

1474
1475 ret = false;
1476label_return:
1477 if (mfd != -1)
1478 close(mfd);
1479 return (ret);
1480}
1481
1482/*
1483 * See prof_sample_threshold_update() comment for why the body of this function
1484 * is conditionally compiled.
1485 */
1445static void
1446prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1447 const char *filename)
1448{
1449
1486static void
1487prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1488 const char *filename)
1489{
1490
1491#ifdef JEMALLOC_PROF
1492 /*
1493 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1494 * differ slightly from what jeprof reports, because here we scale the
1495 * summary values, whereas jeprof scales each context individually and
1496 * reports the sums of the scaled values.
1497 */
1450 if (cnt_all->curbytes != 0) {
1498 if (cnt_all->curbytes != 0) {
1451 malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
1452 FMTu64" object%s, %zu context%s\n",
1453 cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
1454 cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
1455 leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1499 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1500 double ratio = (((double)cnt_all->curbytes) /
1501 (double)cnt_all->curobjs) / sample_period;
1502 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1503 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1504 * scale_factor);
1505 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1506 scale_factor);
1507
1508 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1509 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1510 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1511 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1456 malloc_printf(
1457 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1458 filename);
1459 }
1512 malloc_printf(
1513 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1514 filename);
1515 }
1516#endif
1460}
1461
1517}
1518
1519struct prof_gctx_dump_iter_arg_s {
1520 tsdn_t *tsdn;
1521 bool propagate_err;
1522};
1523
1462static prof_gctx_t *
1524static prof_gctx_t *
1463prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1525prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
1464{
1465 prof_gctx_t *ret;
1526{
1527 prof_gctx_t *ret;
1466 bool propagate_err = *(bool *)arg;
1528 struct prof_gctx_dump_iter_arg_s *arg =
1529 (struct prof_gctx_dump_iter_arg_s *)opaque;
1467
1530
1468 malloc_mutex_lock(gctx->lock);
1531 malloc_mutex_lock(arg->tsdn, gctx->lock);
1469
1532
1470 if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
1533 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
1534 gctxs)) {
1471 ret = gctx;
1472 goto label_return;
1473 }
1474
1475 ret = NULL;
1476label_return:
1535 ret = gctx;
1536 goto label_return;
1537 }
1538
1539 ret = NULL;
1540label_return:
1477 malloc_mutex_unlock(gctx->lock);
1541 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1478 return (ret);
1479}
1480
1481static bool
1482prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1483{
1484 prof_tdata_t *tdata;
1542 return (ret);
1543}
1544
1545static bool
1546prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
1547{
1548 prof_tdata_t *tdata;
1485 prof_cnt_t cnt_all;
1549 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1486 size_t tabind;
1487 union {
1488 prof_gctx_t *p;
1489 void *v;
1490 } gctx;
1550 size_t tabind;
1551 union {
1552 prof_gctx_t *p;
1553 void *v;
1554 } gctx;
1491 size_t leak_ngctx;
1555 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1556 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
1492 prof_gctx_tree_t gctxs;
1493
1494 cassert(config_prof);
1495
1496 tdata = prof_tdata_get(tsd, true);
1497 if (tdata == NULL)
1498 return (true);
1499
1557 prof_gctx_tree_t gctxs;
1558
1559 cassert(config_prof);
1560
1561 tdata = prof_tdata_get(tsd, true);
1562 if (tdata == NULL)
1563 return (true);
1564
1500 malloc_mutex_lock(&prof_dump_mtx);
1565 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1501 prof_enter(tsd, tdata);
1502
1503 /*
1504 * Put gctx's in limbo and clear their counters in preparation for
1505 * summing.
1506 */
1507 gctx_tree_new(&gctxs);
1508 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1566 prof_enter(tsd, tdata);
1567
1568 /*
1569 * Put gctx's in limbo and clear their counters in preparation for
1570 * summing.
1571 */
1572 gctx_tree_new(&gctxs);
1573 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
1509 prof_dump_gctx_prep(gctx.p, &gctxs);
1574 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
1510
1511 /*
1512 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1513 * stats and merge them into the associated gctx's.
1514 */
1575
1576 /*
1577 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1578 * stats and merge them into the associated gctx's.
1579 */
1515 memset(&cnt_all, 0, sizeof(prof_cnt_t));
1516 malloc_mutex_lock(&tdatas_mtx);
1517 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
1518 malloc_mutex_unlock(&tdatas_mtx);
1580 prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
1581 memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
1582 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1583 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
1584 (void *)&prof_tdata_merge_iter_arg);
1585 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
1519
1520 /* Merge tctx stats into gctx's. */
1586
1587 /* Merge tctx stats into gctx's. */
1521 leak_ngctx = 0;
1522 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
1588 prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
1589 prof_gctx_merge_iter_arg.leak_ngctx = 0;
1590 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
1591 (void *)&prof_gctx_merge_iter_arg);
1523
1524 prof_leave(tsd, tdata);
1525
1526 /* Create dump file. */
1527 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1528 goto label_open_close_error;
1529
1530 /* Dump profile header. */
1592
1593 prof_leave(tsd, tdata);
1594
1595 /* Create dump file. */
1596 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
1597 goto label_open_close_error;
1598
1599 /* Dump profile header. */
1531 if (prof_dump_header(propagate_err, &cnt_all))
1600 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
1601 &prof_tdata_merge_iter_arg.cnt_all))
1532 goto label_write_error;
1533
1534 /* Dump per gctx profile stats. */
1602 goto label_write_error;
1603
1604 /* Dump per gctx profile stats. */
1605 prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
1606 prof_gctx_dump_iter_arg.propagate_err = propagate_err;
1535 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1607 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1536 (void *)&propagate_err) != NULL)
1608 (void *)&prof_gctx_dump_iter_arg) != NULL)
1537 goto label_write_error;
1538
1539 /* Dump /proc/<pid>/maps if possible. */
1540 if (prof_dump_maps(propagate_err))
1541 goto label_write_error;
1542
1543 if (prof_dump_close(propagate_err))
1544 goto label_open_close_error;
1545
1546 prof_gctx_finish(tsd, &gctxs);
1609 goto label_write_error;
1610
1611 /* Dump /proc/<pid>/maps if possible. */
1612 if (prof_dump_maps(propagate_err))
1613 goto label_write_error;
1614
1615 if (prof_dump_close(propagate_err))
1616 goto label_open_close_error;
1617
1618 prof_gctx_finish(tsd, &gctxs);
1547 malloc_mutex_unlock(&prof_dump_mtx);
1619 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
1548
1620
1549 if (leakcheck)
1550 prof_leakcheck(&cnt_all, leak_ngctx, filename);
1551
1621 if (leakcheck) {
1622 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1623 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1624 }
1552 return (false);
1553label_write_error:
1554 prof_dump_close(propagate_err);
1555label_open_close_error:
1556 prof_gctx_finish(tsd, &gctxs);
1625 return (false);
1626label_write_error:
1627 prof_dump_close(propagate_err);
1628label_open_close_error:
1629 prof_gctx_finish(tsd, &gctxs);
1557 malloc_mutex_unlock(&prof_dump_mtx);
1630 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
1558 return (true);
1559}
1560
1561#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1562#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1563static void
1564prof_dump_filename(char *filename, char v, uint64_t vseq)
1565{

--- 23 unchanged lines hidden (view full) ---

1589 cassert(config_prof);
1590 assert(opt_prof_final);
1591 assert(opt_prof_prefix[0] != '\0');
1592
1593 if (!prof_booted)
1594 return;
1595 tsd = tsd_fetch();
1596
1631 return (true);
1632}
1633
1634#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1635#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1636static void
1637prof_dump_filename(char *filename, char v, uint64_t vseq)
1638{

--- 23 unchanged lines hidden (view full) ---

1662 cassert(config_prof);
1663 assert(opt_prof_final);
1664 assert(opt_prof_prefix[0] != '\0');
1665
1666 if (!prof_booted)
1667 return;
1668 tsd = tsd_fetch();
1669
1597 malloc_mutex_lock(&prof_dump_seq_mtx);
1670 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1598 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1671 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1599 malloc_mutex_unlock(&prof_dump_seq_mtx);
1672 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1600 prof_dump(tsd, false, filename, opt_prof_leak);
1601}
1602
1603void
1673 prof_dump(tsd, false, filename, opt_prof_leak);
1674}
1675
1676void
1604prof_idump(void)
1677prof_idump(tsdn_t *tsdn)
1605{
1606 tsd_t *tsd;
1607 prof_tdata_t *tdata;
1608
1609 cassert(config_prof);
1610
1678{
1679 tsd_t *tsd;
1680 prof_tdata_t *tdata;
1681
1682 cassert(config_prof);
1683
1611 if (!prof_booted)
1684 if (!prof_booted || tsdn_null(tsdn))
1612 return;
1685 return;
1613 tsd = tsd_fetch();
1686 tsd = tsdn_tsd(tsdn);
1614 tdata = prof_tdata_get(tsd, false);
1615 if (tdata == NULL)
1616 return;
1617 if (tdata->enq) {
1618 tdata->enq_idump = true;
1619 return;
1620 }
1621
1622 if (opt_prof_prefix[0] != '\0') {
1623 char filename[PATH_MAX + 1];
1687 tdata = prof_tdata_get(tsd, false);
1688 if (tdata == NULL)
1689 return;
1690 if (tdata->enq) {
1691 tdata->enq_idump = true;
1692 return;
1693 }
1694
1695 if (opt_prof_prefix[0] != '\0') {
1696 char filename[PATH_MAX + 1];
1624 malloc_mutex_lock(&prof_dump_seq_mtx);
1697 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1625 prof_dump_filename(filename, 'i', prof_dump_iseq);
1626 prof_dump_iseq++;
1698 prof_dump_filename(filename, 'i', prof_dump_iseq);
1699 prof_dump_iseq++;
1627 malloc_mutex_unlock(&prof_dump_seq_mtx);
1700 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1628 prof_dump(tsd, false, filename, false);
1629 }
1630}
1631
1632bool
1701 prof_dump(tsd, false, filename, false);
1702 }
1703}
1704
1705bool
1633prof_mdump(const char *filename)
1706prof_mdump(tsd_t *tsd, const char *filename)
1634{
1707{
1635 tsd_t *tsd;
1636 char filename_buf[DUMP_FILENAME_BUFSIZE];
1637
1638 cassert(config_prof);
1639
1640 if (!opt_prof || !prof_booted)
1641 return (true);
1708 char filename_buf[DUMP_FILENAME_BUFSIZE];
1709
1710 cassert(config_prof);
1711
1712 if (!opt_prof || !prof_booted)
1713 return (true);
1642 tsd = tsd_fetch();
1643
1644 if (filename == NULL) {
1645 /* No filename specified, so automatically generate one. */
1646 if (opt_prof_prefix[0] == '\0')
1647 return (true);
1714
1715 if (filename == NULL) {
1716 /* No filename specified, so automatically generate one. */
1717 if (opt_prof_prefix[0] == '\0')
1718 return (true);
1648 malloc_mutex_lock(&prof_dump_seq_mtx);
1719 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1649 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1650 prof_dump_mseq++;
1720 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1721 prof_dump_mseq++;
1651 malloc_mutex_unlock(&prof_dump_seq_mtx);
1722 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
1652 filename = filename_buf;
1653 }
1654 return (prof_dump(tsd, true, filename, false));
1655}
1656
1657void
1723 filename = filename_buf;
1724 }
1725 return (prof_dump(tsd, true, filename, false));
1726}
1727
1728void
1658prof_gdump(void)
1729prof_gdump(tsdn_t *tsdn)
1659{
1660 tsd_t *tsd;
1661 prof_tdata_t *tdata;
1662
1663 cassert(config_prof);
1664
1730{
1731 tsd_t *tsd;
1732 prof_tdata_t *tdata;
1733
1734 cassert(config_prof);
1735
1665 if (!prof_booted)
1736 if (!prof_booted || tsdn_null(tsdn))
1666 return;
1737 return;
1667 tsd = tsd_fetch();
1738 tsd = tsdn_tsd(tsdn);
1668 tdata = prof_tdata_get(tsd, false);
1669 if (tdata == NULL)
1670 return;
1671 if (tdata->enq) {
1672 tdata->enq_gdump = true;
1673 return;
1674 }
1675
1676 if (opt_prof_prefix[0] != '\0') {
1677 char filename[DUMP_FILENAME_BUFSIZE];
1739 tdata = prof_tdata_get(tsd, false);
1740 if (tdata == NULL)
1741 return;
1742 if (tdata->enq) {
1743 tdata->enq_gdump = true;
1744 return;
1745 }
1746
1747 if (opt_prof_prefix[0] != '\0') {
1748 char filename[DUMP_FILENAME_BUFSIZE];
1678 malloc_mutex_lock(&prof_dump_seq_mtx);
1749 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
1679 prof_dump_filename(filename, 'u', prof_dump_useq);
1680 prof_dump_useq++;
1750 prof_dump_filename(filename, 'u', prof_dump_useq);
1751 prof_dump_useq++;
1681 malloc_mutex_unlock(&prof_dump_seq_mtx);
1752 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
1682 prof_dump(tsd, false, filename, false);
1683 }
1684}
1685
1686static void
1687prof_bt_hash(const void *key, size_t r_hash[2])
1688{
1689 prof_bt_t *bt = (prof_bt_t *)key;

--- 12 unchanged lines hidden (view full) ---

1702 cassert(config_prof);
1703
1704 if (bt1->len != bt2->len)
1705 return (false);
1706 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1707}
1708
1709JEMALLOC_INLINE_C uint64_t
1753 prof_dump(tsd, false, filename, false);
1754 }
1755}
1756
1757static void
1758prof_bt_hash(const void *key, size_t r_hash[2])
1759{
1760 prof_bt_t *bt = (prof_bt_t *)key;

--- 12 unchanged lines hidden (view full) ---

1773 cassert(config_prof);
1774
1775 if (bt1->len != bt2->len)
1776 return (false);
1777 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1778}
1779
1780JEMALLOC_INLINE_C uint64_t
1710prof_thr_uid_alloc(void)
1781prof_thr_uid_alloc(tsdn_t *tsdn)
1711{
1712 uint64_t thr_uid;
1713
1782{
1783 uint64_t thr_uid;
1784
1714 malloc_mutex_lock(&next_thr_uid_mtx);
1785 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
1715 thr_uid = next_thr_uid;
1716 next_thr_uid++;
1786 thr_uid = next_thr_uid;
1787 next_thr_uid++;
1717 malloc_mutex_unlock(&next_thr_uid_mtx);
1788 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
1718
1719 return (thr_uid);
1720}
1721
1722static prof_tdata_t *
1789
1790 return (thr_uid);
1791}
1792
1793static prof_tdata_t *
1723prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
1794prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim,
1724 char *thread_name, bool active)
1725{
1726 prof_tdata_t *tdata;
1795 char *thread_name, bool active)
1796{
1797 prof_tdata_t *tdata;
1727 tcache_t *tcache;
1728
1729 cassert(config_prof);
1730
1731 /* Initialize an empty cache for this thread. */
1798
1799 cassert(config_prof);
1800
1801 /* Initialize an empty cache for this thread. */
1732 tcache = tcache_get(tsd, true);
1733 tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
1734 size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
1802 tdata = (prof_tdata_t *)iallocztm(tsdn, sizeof(prof_tdata_t),
1803 size2index(sizeof(prof_tdata_t)), false, NULL, true,
1804 arena_get(TSDN_NULL, 0, true), true);
1735 if (tdata == NULL)
1736 return (NULL);
1737
1738 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1739 tdata->thr_uid = thr_uid;
1740 tdata->thr_discrim = thr_discrim;
1741 tdata->thread_name = thread_name;
1742 tdata->attached = true;
1743 tdata->expired = false;
1744 tdata->tctx_uid_next = 0;
1745
1805 if (tdata == NULL)
1806 return (NULL);
1807
1808 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1809 tdata->thr_uid = thr_uid;
1810 tdata->thr_discrim = thr_discrim;
1811 tdata->thread_name = thread_name;
1812 tdata->attached = true;
1813 tdata->expired = false;
1814 tdata->tctx_uid_next = 0;
1815
1746 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1816 if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS,
1747 prof_bt_hash, prof_bt_keycomp)) {
1817 prof_bt_hash, prof_bt_keycomp)) {
1748 idalloctm(tsd, tdata, tcache, true, true);
1818 idalloctm(tsdn, tdata, NULL, true, true);
1749 return (NULL);
1750 }
1751
1752 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1753 prof_sample_threshold_update(tdata);
1754
1755 tdata->enq = false;
1756 tdata->enq_idump = false;
1757 tdata->enq_gdump = false;
1758
1759 tdata->dumping = false;
1760 tdata->active = active;
1761
1819 return (NULL);
1820 }
1821
1822 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1823 prof_sample_threshold_update(tdata);
1824
1825 tdata->enq = false;
1826 tdata->enq_idump = false;
1827 tdata->enq_gdump = false;
1828
1829 tdata->dumping = false;
1830 tdata->active = active;
1831
1762 malloc_mutex_lock(&tdatas_mtx);
1832 malloc_mutex_lock(tsdn, &tdatas_mtx);
1763 tdata_tree_insert(&tdatas, tdata);
1833 tdata_tree_insert(&tdatas, tdata);
1764 malloc_mutex_unlock(&tdatas_mtx);
1834 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1765
1766 return (tdata);
1767}
1768
1769prof_tdata_t *
1835
1836 return (tdata);
1837}
1838
1839prof_tdata_t *
1770prof_tdata_init(tsd_t *tsd)
1840prof_tdata_init(tsdn_t *tsdn)
1771{
1772
1841{
1842
1773 return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
1774 prof_thread_active_init_get()));
1843 return (prof_tdata_init_impl(tsdn, prof_thr_uid_alloc(tsdn), 0, NULL,
1844 prof_thread_active_init_get(tsdn)));
1775}
1776
1845}
1846
1777/* tdata->lock must be held. */
1778static bool
1847static bool
1779prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
1848prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
1780{
1781
1782 if (tdata->attached && !even_if_attached)
1783 return (false);
1784 if (ckh_count(&tdata->bt2tctx) != 0)
1785 return (false);
1786 return (true);
1787}
1788
1849{
1850
1851 if (tdata->attached && !even_if_attached)
1852 return (false);
1853 if (ckh_count(&tdata->bt2tctx) != 0)
1854 return (false);
1855 return (true);
1856}
1857
1789/* tdatas_mtx must be held. */
1858static bool
1859prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
1860 bool even_if_attached)
1861{
1862
1863 malloc_mutex_assert_owner(tsdn, tdata->lock);
1864
1865 return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
1866}
1867
1790static void
1868static void
1791prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
1869prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata,
1792 bool even_if_attached)
1793{
1870 bool even_if_attached)
1871{
1794 tcache_t *tcache;
1795
1872
1796 assert(prof_tdata_should_destroy(tdata, even_if_attached));
1797 assert(tsd_prof_tdata_get(tsd) != tdata);
1873 malloc_mutex_assert_owner(tsdn, &tdatas_mtx);
1798
1874
1875 assert(tsdn_null(tsdn) || tsd_prof_tdata_get(tsdn_tsd(tsdn)) != tdata);
1876
1799 tdata_tree_remove(&tdatas, tdata);
1800
1877 tdata_tree_remove(&tdatas, tdata);
1878
1801 tcache = tcache_get(tsd, false);
1879 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
1880
1802 if (tdata->thread_name != NULL)
1881 if (tdata->thread_name != NULL)
1803 idalloctm(tsd, tdata->thread_name, tcache, true, true);
1804 ckh_delete(tsd, &tdata->bt2tctx);
1805 idalloctm(tsd, tdata, tcache, true, true);
1882 idalloctm(tsdn, tdata->thread_name, NULL, true, true);
1883 ckh_delete(tsdn, &tdata->bt2tctx);
1884 idalloctm(tsdn, tdata, NULL, true, true);
1806}
1807
1808static void
1885}
1886
1887static void
1809prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
1888prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached)
1810{
1811
1889{
1890
1812 malloc_mutex_lock(&tdatas_mtx);
1813 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
1814 malloc_mutex_unlock(&tdatas_mtx);
1891 malloc_mutex_lock(tsdn, &tdatas_mtx);
1892 prof_tdata_destroy_locked(tsdn, tdata, even_if_attached);
1893 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1815}
1816
1817static void
1818prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1819{
1820 bool destroy_tdata;
1821
1894}
1895
1896static void
1897prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
1898{
1899 bool destroy_tdata;
1900
1822 malloc_mutex_lock(tdata->lock);
1901 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
1823 if (tdata->attached) {
1902 if (tdata->attached) {
1824 destroy_tdata = prof_tdata_should_destroy(tdata, true);
1903 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
1904 true);
1825 /*
1826 * Only detach if !destroy_tdata, because detaching would allow
1827 * another thread to win the race to destroy tdata.
1828 */
1829 if (!destroy_tdata)
1830 tdata->attached = false;
1831 tsd_prof_tdata_set(tsd, NULL);
1832 } else
1833 destroy_tdata = false;
1905 /*
1906 * Only detach if !destroy_tdata, because detaching would allow
1907 * another thread to win the race to destroy tdata.
1908 */
1909 if (!destroy_tdata)
1910 tdata->attached = false;
1911 tsd_prof_tdata_set(tsd, NULL);
1912 } else
1913 destroy_tdata = false;
1834 malloc_mutex_unlock(tdata->lock);
1914 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
1835 if (destroy_tdata)
1915 if (destroy_tdata)
1836 prof_tdata_destroy(tsd, tdata, true);
1916 prof_tdata_destroy(tsd_tsdn(tsd), tdata, true);
1837}
1838
1839prof_tdata_t *
1840prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1841{
1842 uint64_t thr_uid = tdata->thr_uid;
1843 uint64_t thr_discrim = tdata->thr_discrim + 1;
1844 char *thread_name = (tdata->thread_name != NULL) ?
1917}
1918
1919prof_tdata_t *
1920prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
1921{
1922 uint64_t thr_uid = tdata->thr_uid;
1923 uint64_t thr_discrim = tdata->thr_discrim + 1;
1924 char *thread_name = (tdata->thread_name != NULL) ?
1845 prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
1925 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
1846 bool active = tdata->active;
1847
1848 prof_tdata_detach(tsd, tdata);
1926 bool active = tdata->active;
1927
1928 prof_tdata_detach(tsd, tdata);
1849 return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1850 active));
1929 return (prof_tdata_init_impl(tsd_tsdn(tsd), thr_uid, thr_discrim,
1930 thread_name, active));
1851}
1852
1853static bool
1931}
1932
1933static bool
1854prof_tdata_expire(prof_tdata_t *tdata)
1934prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
1855{
1856 bool destroy_tdata;
1857
1935{
1936 bool destroy_tdata;
1937
1858 malloc_mutex_lock(tdata->lock);
1938 malloc_mutex_lock(tsdn, tdata->lock);
1859 if (!tdata->expired) {
1860 tdata->expired = true;
1861 destroy_tdata = tdata->attached ? false :
1939 if (!tdata->expired) {
1940 tdata->expired = true;
1941 destroy_tdata = tdata->attached ? false :
1862 prof_tdata_should_destroy(tdata, false);
1942 prof_tdata_should_destroy(tsdn, tdata, false);
1863 } else
1864 destroy_tdata = false;
1943 } else
1944 destroy_tdata = false;
1865 malloc_mutex_unlock(tdata->lock);
1945 malloc_mutex_unlock(tsdn, tdata->lock);
1866
1867 return (destroy_tdata);
1868}
1869
1870static prof_tdata_t *
1871prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1872{
1946
1947 return (destroy_tdata);
1948}
1949
1950static prof_tdata_t *
1951prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1952{
1953 tsdn_t *tsdn = (tsdn_t *)arg;
1873
1954
1874 return (prof_tdata_expire(tdata) ? tdata : NULL);
1955 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
1875}
1876
1877void
1956}
1957
1958void
1878prof_reset(tsd_t *tsd, size_t lg_sample)
1959prof_reset(tsdn_t *tsdn, size_t lg_sample)
1879{
1880 prof_tdata_t *next;
1881
1882 assert(lg_sample < (sizeof(uint64_t) << 3));
1883
1960{
1961 prof_tdata_t *next;
1962
1963 assert(lg_sample < (sizeof(uint64_t) << 3));
1964
1884 malloc_mutex_lock(&prof_dump_mtx);
1885 malloc_mutex_lock(&tdatas_mtx);
1965 malloc_mutex_lock(tsdn, &prof_dump_mtx);
1966 malloc_mutex_lock(tsdn, &tdatas_mtx);
1886
1887 lg_prof_sample = lg_sample;
1888
1889 next = NULL;
1890 do {
1891 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1967
1968 lg_prof_sample = lg_sample;
1969
1970 next = NULL;
1971 do {
1972 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1892 prof_tdata_reset_iter, NULL);
1973 prof_tdata_reset_iter, (void *)tsdn);
1893 if (to_destroy != NULL) {
1894 next = tdata_tree_next(&tdatas, to_destroy);
1974 if (to_destroy != NULL) {
1975 next = tdata_tree_next(&tdatas, to_destroy);
1895 prof_tdata_destroy_locked(tsd, to_destroy, false);
1976 prof_tdata_destroy_locked(tsdn, to_destroy, false);
1896 } else
1897 next = NULL;
1898 } while (next != NULL);
1899
1977 } else
1978 next = NULL;
1979 } while (next != NULL);
1980
1900 malloc_mutex_unlock(&tdatas_mtx);
1901 malloc_mutex_unlock(&prof_dump_mtx);
1981 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1982 malloc_mutex_unlock(tsdn, &prof_dump_mtx);
1902}
1903
1904void
1905prof_tdata_cleanup(tsd_t *tsd)
1906{
1907 prof_tdata_t *tdata;
1908
1909 if (!config_prof)
1910 return;
1911
1912 tdata = tsd_prof_tdata_get(tsd);
1913 if (tdata != NULL)
1914 prof_tdata_detach(tsd, tdata);
1915}
1916
1917bool
1983}
1984
1985void
1986prof_tdata_cleanup(tsd_t *tsd)
1987{
1988 prof_tdata_t *tdata;
1989
1990 if (!config_prof)
1991 return;
1992
1993 tdata = tsd_prof_tdata_get(tsd);
1994 if (tdata != NULL)
1995 prof_tdata_detach(tsd, tdata);
1996}
1997
1998bool
1918prof_active_get(void)
1999prof_active_get(tsdn_t *tsdn)
1919{
1920 bool prof_active_current;
1921
2000{
2001 bool prof_active_current;
2002
1922 malloc_mutex_lock(&prof_active_mtx);
2003 malloc_mutex_lock(tsdn, &prof_active_mtx);
1923 prof_active_current = prof_active;
2004 prof_active_current = prof_active;
1924 malloc_mutex_unlock(&prof_active_mtx);
2005 malloc_mutex_unlock(tsdn, &prof_active_mtx);
1925 return (prof_active_current);
1926}
1927
1928bool
2006 return (prof_active_current);
2007}
2008
2009bool
1929prof_active_set(bool active)
2010prof_active_set(tsdn_t *tsdn, bool active)
1930{
1931 bool prof_active_old;
1932
2011{
2012 bool prof_active_old;
2013
1933 malloc_mutex_lock(&prof_active_mtx);
2014 malloc_mutex_lock(tsdn, &prof_active_mtx);
1934 prof_active_old = prof_active;
1935 prof_active = active;
2015 prof_active_old = prof_active;
2016 prof_active = active;
1936 malloc_mutex_unlock(&prof_active_mtx);
2017 malloc_mutex_unlock(tsdn, &prof_active_mtx);
1937 return (prof_active_old);
1938}
1939
1940const char *
2018 return (prof_active_old);
2019}
2020
2021const char *
1941prof_thread_name_get(void)
2022prof_thread_name_get(tsd_t *tsd)
1942{
2023{
1943 tsd_t *tsd;
1944 prof_tdata_t *tdata;
1945
2024 prof_tdata_t *tdata;
2025
1946 tsd = tsd_fetch();
1947 tdata = prof_tdata_get(tsd, true);
1948 if (tdata == NULL)
1949 return ("");
1950 return (tdata->thread_name != NULL ? tdata->thread_name : "");
1951}
1952
1953static char *
2026 tdata = prof_tdata_get(tsd, true);
2027 if (tdata == NULL)
2028 return ("");
2029 return (tdata->thread_name != NULL ? tdata->thread_name : "");
2030}
2031
2032static char *
1954prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
2033prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
1955{
1956 char *ret;
1957 size_t size;
1958
1959 if (thread_name == NULL)
1960 return (NULL);
1961
1962 size = strlen(thread_name) + 1;
1963 if (size == 1)
1964 return ("");
1965
2034{
2035 char *ret;
2036 size_t size;
2037
2038 if (thread_name == NULL)
2039 return (NULL);
2040
2041 size = strlen(thread_name) + 1;
2042 if (size == 1)
2043 return ("");
2044
1966 ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
1967 true), true, NULL, true);
2045 ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
2046 arena_get(TSDN_NULL, 0, true), true);
1968 if (ret == NULL)
1969 return (NULL);
1970 memcpy(ret, thread_name, size);
1971 return (ret);
1972}
1973
1974int
1975prof_thread_name_set(tsd_t *tsd, const char *thread_name)

--- 10 unchanged lines hidden (view full) ---

1986 if (thread_name == NULL)
1987 return (EFAULT);
1988 for (i = 0; thread_name[i] != '\0'; i++) {
1989 char c = thread_name[i];
1990 if (!isgraph(c) && !isblank(c))
1991 return (EFAULT);
1992 }
1993
2047 if (ret == NULL)
2048 return (NULL);
2049 memcpy(ret, thread_name, size);
2050 return (ret);
2051}
2052
2053int
2054prof_thread_name_set(tsd_t *tsd, const char *thread_name)

--- 10 unchanged lines hidden (view full) ---

2065 if (thread_name == NULL)
2066 return (EFAULT);
2067 for (i = 0; thread_name[i] != '\0'; i++) {
2068 char c = thread_name[i];
2069 if (!isgraph(c) && !isblank(c))
2070 return (EFAULT);
2071 }
2072
1994 s = prof_thread_name_alloc(tsd, thread_name);
2073 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
1995 if (s == NULL)
1996 return (EAGAIN);
1997
1998 if (tdata->thread_name != NULL) {
2074 if (s == NULL)
2075 return (EAGAIN);
2076
2077 if (tdata->thread_name != NULL) {
1999 idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
2000 true, true);
2078 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
2001 tdata->thread_name = NULL;
2002 }
2003 if (strlen(s) > 0)
2004 tdata->thread_name = s;
2005 return (0);
2006}
2007
2008bool
2079 tdata->thread_name = NULL;
2080 }
2081 if (strlen(s) > 0)
2082 tdata->thread_name = s;
2083 return (0);
2084}
2085
2086bool
2009prof_thread_active_get(void)
2087prof_thread_active_get(tsd_t *tsd)
2010{
2088{
2011 tsd_t *tsd;
2012 prof_tdata_t *tdata;
2013
2089 prof_tdata_t *tdata;
2090
2014 tsd = tsd_fetch();
2015 tdata = prof_tdata_get(tsd, true);
2016 if (tdata == NULL)
2017 return (false);
2018 return (tdata->active);
2019}
2020
2021bool
2091 tdata = prof_tdata_get(tsd, true);
2092 if (tdata == NULL)
2093 return (false);
2094 return (tdata->active);
2095}
2096
2097bool
2022prof_thread_active_set(bool active)
2098prof_thread_active_set(tsd_t *tsd, bool active)
2023{
2099{
2024 tsd_t *tsd;
2025 prof_tdata_t *tdata;
2026
2100 prof_tdata_t *tdata;
2101
2027 tsd = tsd_fetch();
2028 tdata = prof_tdata_get(tsd, true);
2029 if (tdata == NULL)
2030 return (true);
2031 tdata->active = active;
2032 return (false);
2033}
2034
2035bool
2102 tdata = prof_tdata_get(tsd, true);
2103 if (tdata == NULL)
2104 return (true);
2105 tdata->active = active;
2106 return (false);
2107}
2108
2109bool
2036prof_thread_active_init_get(void)
2110prof_thread_active_init_get(tsdn_t *tsdn)
2037{
2038 bool active_init;
2039
2111{
2112 bool active_init;
2113
2040 malloc_mutex_lock(&prof_thread_active_init_mtx);
2114 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2041 active_init = prof_thread_active_init;
2115 active_init = prof_thread_active_init;
2042 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2116 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2043 return (active_init);
2044}
2045
2046bool
2117 return (active_init);
2118}
2119
2120bool
2047prof_thread_active_init_set(bool active_init)
2121prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
2048{
2049 bool active_init_old;
2050
2122{
2123 bool active_init_old;
2124
2051 malloc_mutex_lock(&prof_thread_active_init_mtx);
2125 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2052 active_init_old = prof_thread_active_init;
2053 prof_thread_active_init = active_init;
2126 active_init_old = prof_thread_active_init;
2127 prof_thread_active_init = active_init;
2054 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2128 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2055 return (active_init_old);
2056}
2057
2058bool
2129 return (active_init_old);
2130}
2131
2132bool
2059prof_gdump_get(void)
2133prof_gdump_get(tsdn_t *tsdn)
2060{
2061 bool prof_gdump_current;
2062
2134{
2135 bool prof_gdump_current;
2136
2063 malloc_mutex_lock(&prof_gdump_mtx);
2137 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2064 prof_gdump_current = prof_gdump_val;
2138 prof_gdump_current = prof_gdump_val;
2065 malloc_mutex_unlock(&prof_gdump_mtx);
2139 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2066 return (prof_gdump_current);
2067}
2068
2069bool
2140 return (prof_gdump_current);
2141}
2142
2143bool
2070prof_gdump_set(bool gdump)
2144prof_gdump_set(tsdn_t *tsdn, bool gdump)
2071{
2072 bool prof_gdump_old;
2073
2145{
2146 bool prof_gdump_old;
2147
2074 malloc_mutex_lock(&prof_gdump_mtx);
2148 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2075 prof_gdump_old = prof_gdump_val;
2076 prof_gdump_val = gdump;
2149 prof_gdump_old = prof_gdump_val;
2150 prof_gdump_val = gdump;
2077 malloc_mutex_unlock(&prof_gdump_mtx);
2151 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2078 return (prof_gdump_old);
2079}
2080
2081void
2082prof_boot0(void)
2083{
2084
2085 cassert(config_prof);

--- 24 unchanged lines hidden (view full) ---

2110 if (opt_lg_prof_interval >= 0) {
2111 prof_interval = (((uint64_t)1U) <<
2112 opt_lg_prof_interval);
2113 }
2114 }
2115}
2116
2117bool
2152 return (prof_gdump_old);
2153}
2154
2155void
2156prof_boot0(void)
2157{
2158
2159 cassert(config_prof);

--- 24 unchanged lines hidden (view full) ---

2184 if (opt_lg_prof_interval >= 0) {
2185 prof_interval = (((uint64_t)1U) <<
2186 opt_lg_prof_interval);
2187 }
2188 }
2189}
2190
2191bool
2118prof_boot2(void)
2192prof_boot2(tsdn_t *tsdn)
2119{
2120
2121 cassert(config_prof);
2122
2123 if (opt_prof) {
2193{
2194
2195 cassert(config_prof);
2196
2197 if (opt_prof) {
2124 tsd_t *tsd;
2125 unsigned i;
2126
2127 lg_prof_sample = opt_lg_prof_sample;
2128
2129 prof_active = opt_prof_active;
2198 unsigned i;
2199
2200 lg_prof_sample = opt_lg_prof_sample;
2201
2202 prof_active = opt_prof_active;
2130 if (malloc_mutex_init(&prof_active_mtx))
2203 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
2204 WITNESS_RANK_PROF_ACTIVE))
2131 return (true);
2132
2133 prof_gdump_val = opt_prof_gdump;
2205 return (true);
2206
2207 prof_gdump_val = opt_prof_gdump;
2134 if (malloc_mutex_init(&prof_gdump_mtx))
2208 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
2209 WITNESS_RANK_PROF_GDUMP))
2135 return (true);
2136
2137 prof_thread_active_init = opt_prof_thread_active_init;
2210 return (true);
2211
2212 prof_thread_active_init = opt_prof_thread_active_init;
2138 if (malloc_mutex_init(&prof_thread_active_init_mtx))
2213 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2214 "prof_thread_active_init",
2215 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
2139 return (true);
2140
2216 return (true);
2217
2141 tsd = tsd_fetch();
2142 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2218 if (ckh_new(tsdn, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2143 prof_bt_keycomp))
2144 return (true);
2219 prof_bt_keycomp))
2220 return (true);
2145 if (malloc_mutex_init(&bt2gctx_mtx))
2221 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
2222 WITNESS_RANK_PROF_BT2GCTX))
2146 return (true);
2147
2148 tdata_tree_new(&tdatas);
2223 return (true);
2224
2225 tdata_tree_new(&tdatas);
2149 if (malloc_mutex_init(&tdatas_mtx))
2226 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
2227 WITNESS_RANK_PROF_TDATAS))
2150 return (true);
2151
2152 next_thr_uid = 0;
2228 return (true);
2229
2230 next_thr_uid = 0;
2153 if (malloc_mutex_init(&next_thr_uid_mtx))
2231 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
2232 WITNESS_RANK_PROF_NEXT_THR_UID))
2154 return (true);
2155
2233 return (true);
2234
2156 if (malloc_mutex_init(&prof_dump_seq_mtx))
2235 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
2236 WITNESS_RANK_PROF_DUMP_SEQ))
2157 return (true);
2237 return (true);
2158 if (malloc_mutex_init(&prof_dump_mtx))
2238 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
2239 WITNESS_RANK_PROF_DUMP))
2159 return (true);
2160
2161 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2162 atexit(prof_fdump) != 0) {
2163 malloc_write("<jemalloc>: Error in atexit()\n");
2164 if (opt_abort)
2165 abort();
2166 }
2167
2240 return (true);
2241
2242 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2243 atexit(prof_fdump) != 0) {
2244 malloc_write("<jemalloc>: Error in atexit()\n");
2245 if (opt_abort)
2246 abort();
2247 }
2248
2168 gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
2169 sizeof(malloc_mutex_t));
2249 gctx_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NCTX_LOCKS
2250 * sizeof(malloc_mutex_t));
2170 if (gctx_locks == NULL)
2171 return (true);
2172 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2251 if (gctx_locks == NULL)
2252 return (true);
2253 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
2173 if (malloc_mutex_init(&gctx_locks[i]))
2254 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
2255 WITNESS_RANK_PROF_GCTX))
2174 return (true);
2175 }
2176
2256 return (true);
2257 }
2258
2177 tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
2178 sizeof(malloc_mutex_t));
2259 tdata_locks = (malloc_mutex_t *)base_alloc(tsdn,
2260 PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
2179 if (tdata_locks == NULL)
2180 return (true);
2181 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2261 if (tdata_locks == NULL)
2262 return (true);
2263 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2182 if (malloc_mutex_init(&tdata_locks[i]))
2264 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
2265 WITNESS_RANK_PROF_TDATA))
2183 return (true);
2184 }
2185 }
2186
2187#ifdef JEMALLOC_PROF_LIBGCC
2188 /*
2189 * Cause the backtracing machinery to allocate its internal state
2190 * before enabling profiling.
2191 */
2192 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2193#endif
2194
2195 prof_booted = true;
2196
2197 return (false);
2198}
2199
2200void
2266 return (true);
2267 }
2268 }
2269
2270#ifdef JEMALLOC_PROF_LIBGCC
2271 /*
2272 * Cause the backtracing machinery to allocate its internal state
2273 * before enabling profiling.
2274 */
2275 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2276#endif
2277
2278 prof_booted = true;
2279
2280 return (false);
2281}
2282
2283void
2201prof_prefork(void)
2284prof_prefork0(tsdn_t *tsdn)
2202{
2203
2204 if (opt_prof) {
2205 unsigned i;
2206
2285{
2286
2287 if (opt_prof) {
2288 unsigned i;
2289
2207 malloc_mutex_prefork(&tdatas_mtx);
2208 malloc_mutex_prefork(&bt2gctx_mtx);
2209 malloc_mutex_prefork(&next_thr_uid_mtx);
2210 malloc_mutex_prefork(&prof_dump_seq_mtx);
2211 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2212 malloc_mutex_prefork(&gctx_locks[i]);
2290 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
2291 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
2292 malloc_mutex_prefork(tsdn, &tdatas_mtx);
2213 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2293 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2214 malloc_mutex_prefork(&tdata_locks[i]);
2294 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
2295 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2296 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
2215 }
2216}
2217
2218void
2297 }
2298}
2299
2300void
2219prof_postfork_parent(void)
2301prof_prefork1(tsdn_t *tsdn)
2220{
2221
2222 if (opt_prof) {
2302{
2303
2304 if (opt_prof) {
2305 malloc_mutex_prefork(tsdn, &prof_active_mtx);
2306 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
2307 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
2308 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
2309 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
2310 }
2311}
2312
2313void
2314prof_postfork_parent(tsdn_t *tsdn)
2315{
2316
2317 if (opt_prof) {
2223 unsigned i;
2224
2318 unsigned i;
2319
2225 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2226 malloc_mutex_postfork_parent(&tdata_locks[i]);
2320 malloc_mutex_postfork_parent(tsdn,
2321 &prof_thread_active_init_mtx);
2322 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
2323 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
2324 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
2325 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
2227 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2326 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2228 malloc_mutex_postfork_parent(&gctx_locks[i]);
2229 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
2230 malloc_mutex_postfork_parent(&next_thr_uid_mtx);
2231 malloc_mutex_postfork_parent(&bt2gctx_mtx);
2232 malloc_mutex_postfork_parent(&tdatas_mtx);
2327 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
2328 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2329 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
2330 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
2331 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
2332 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
2233 }
2234}
2235
2236void
2333 }
2334}
2335
2336void
2237prof_postfork_child(void)
2337prof_postfork_child(tsdn_t *tsdn)
2238{
2239
2240 if (opt_prof) {
2241 unsigned i;
2242
2338{
2339
2340 if (opt_prof) {
2341 unsigned i;
2342
2243 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2244 malloc_mutex_postfork_child(&tdata_locks[i]);
2343 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
2344 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
2345 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
2346 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
2347 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
2245 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2348 for (i = 0; i < PROF_NCTX_LOCKS; i++)
2246 malloc_mutex_postfork_child(&gctx_locks[i]);
2247 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
2248 malloc_mutex_postfork_child(&next_thr_uid_mtx);
2249 malloc_mutex_postfork_child(&bt2gctx_mtx);
2250 malloc_mutex_postfork_child(&tdatas_mtx);
2349 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
2350 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2351 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
2352 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
2353 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
2354 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
2251 }
2252}
2253
2254/******************************************************************************/
2355 }
2356}
2357
2358/******************************************************************************/