1#define JEMALLOC_MUTEX_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/malloc_io.h"
7#include "jemalloc/internal/spin.h"
8
9#ifndef _CRT_SPINCOUNT
10#define _CRT_SPINCOUNT 4000
11#endif
12
13/******************************************************************************/
14/* Data. */
15
16#ifdef JEMALLOC_LAZY_LOCK
17bool isthreaded = false;
18#endif
19#ifdef JEMALLOC_MUTEX_INIT_CB
20static bool		postpone_init = true;
21static malloc_mutex_t	*postponed_mutexes = NULL;
22#endif
23
24/******************************************************************************/
25/*
26 * We intercept pthread_create() calls in order to toggle isthreaded if the
27 * process goes multi-threaded.
28 */
29
30#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
31JEMALLOC_EXPORT int
32pthread_create(pthread_t *__restrict thread,
33    const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
34    void *__restrict arg) {
35	return pthread_create_wrapper(thread, attr, start_routine, arg);
36}
37#endif
38
39/******************************************************************************/
40
41#ifdef JEMALLOC_MUTEX_INIT_CB
42JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
43    void *(calloc_cb)(size_t, size_t));
44
45#pragma weak _pthread_mutex_init_calloc_cb
46int
47_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
48    void *(calloc_cb)(size_t, size_t))
49{
50
51	return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
52	    __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
53	    calloc_cb));
54}
55#endif
56
57void
58malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
59	mutex_prof_data_t *data = &mutex->prof_data;
60	UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
61
62	if (ncpus == 1) {
63		goto label_spin_done;
64	}
65
66	int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
67	do {
68		spin_cpu_spinwait();
69		if (!malloc_mutex_trylock_final(mutex)) {
70			data->n_spin_acquired++;
71			return;
72		}
73	} while (cnt++ < max_cnt);
74
75	if (!config_stats) {
76		/* Only spin is useful when stats is off. */
77		malloc_mutex_lock_final(mutex);
78		return;
79	}
80label_spin_done:
81	nstime_update(&before);
82	/* Copy before to after to avoid clock skews. */
83	nstime_t after;
84	nstime_copy(&after, &before);
85	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
86	    ATOMIC_RELAXED) + 1;
87	/* One last try as above two calls may take quite some cycles. */
88	if (!malloc_mutex_trylock_final(mutex)) {
89		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
90		data->n_spin_acquired++;
91		return;
92	}
93
94	/* True slow path. */
95	malloc_mutex_lock_final(mutex);
96	/* Update more slow-path only counters. */
97	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
98	nstime_update(&after);
99
100	nstime_t delta;
101	nstime_copy(&delta, &after);
102	nstime_subtract(&delta, &before);
103
104	data->n_wait_times++;
105	nstime_add(&data->tot_wait_time, &delta);
106	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
107		nstime_copy(&data->max_wait_time, &delta);
108	}
109	if (n_thds > data->max_n_thds) {
110		data->max_n_thds = n_thds;
111	}
112}
113
114static void
115mutex_prof_data_init(mutex_prof_data_t *data) {
116	memset(data, 0, sizeof(mutex_prof_data_t));
117	nstime_init(&data->max_wait_time, 0);
118	nstime_init(&data->tot_wait_time, 0);
119	data->prev_owner = NULL;
120}
121
122void
123malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
124	malloc_mutex_assert_owner(tsdn, mutex);
125	mutex_prof_data_init(&mutex->prof_data);
126}
127
128static int
129mutex_addr_comp(const witness_t *witness1, void *mutex1,
130    const witness_t *witness2, void *mutex2) {
131	assert(mutex1 != NULL);
132	assert(mutex2 != NULL);
133	uintptr_t mu1int = (uintptr_t)mutex1;
134	uintptr_t mu2int = (uintptr_t)mutex2;
135	if (mu1int < mu2int) {
136		return -1;
137	} else if (mu1int == mu2int) {
138		return 0;
139	} else {
140		return 1;
141	}
142}
143
144bool
145malloc_mutex_first_thread(void) {
146
147#ifndef JEMALLOC_MUTEX_INIT_CB
148	return (malloc_mutex_first_thread());
149#else
150	return (false);
151#endif
152}
153
154bool
155malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
156    witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
157	mutex_prof_data_init(&mutex->prof_data);
158#ifdef _WIN32
159#  if _WIN32_WINNT >= 0x0600
160	InitializeSRWLock(&mutex->lock);
161#  else
162	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
163	    _CRT_SPINCOUNT)) {
164		return true;
165	}
166#  endif
167#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
168	mutex->lock = OS_UNFAIR_LOCK_INIT;
169#elif (defined(JEMALLOC_OSSPIN))
170	mutex->lock = 0;
171#elif (defined(JEMALLOC_MUTEX_INIT_CB))
172	if (postpone_init) {
173		mutex->postponed_next = postponed_mutexes;
174		postponed_mutexes = mutex;
175	} else {
176		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
177		    bootstrap_calloc) != 0) {
178			return true;
179		}
180	}
181#else
182	pthread_mutexattr_t attr;
183
184	if (pthread_mutexattr_init(&attr) != 0) {
185		return true;
186	}
187	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
188	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
189		pthread_mutexattr_destroy(&attr);
190		return true;
191	}
192	pthread_mutexattr_destroy(&attr);
193#endif
194	if (config_debug) {
195		mutex->lock_order = lock_order;
196		if (lock_order == malloc_mutex_address_ordered) {
197			witness_init(&mutex->witness, name, rank,
198			    mutex_addr_comp, mutex);
199		} else {
200			witness_init(&mutex->witness, name, rank, NULL, NULL);
201		}
202	}
203	return false;
204}
205
206void
207malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
208	malloc_mutex_lock(tsdn, mutex);
209}
210
211void
212malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
213	malloc_mutex_unlock(tsdn, mutex);
214}
215
216void
217malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
218#ifdef JEMALLOC_MUTEX_INIT_CB
219	malloc_mutex_unlock(tsdn, mutex);
220#else
221	if (malloc_mutex_init(mutex, mutex->witness.name,
222	    mutex->witness.rank, mutex->lock_order)) {
223		malloc_printf("<jemalloc>: Error re-initializing mutex in "
224		    "child\n");
225		if (opt_abort) {
226			abort();
227		}
228	}
229#endif
230}
231
232bool
233malloc_mutex_boot(void) {
234#ifdef JEMALLOC_MUTEX_INIT_CB
235	postpone_init = false;
236	while (postponed_mutexes != NULL) {
237		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
238		    bootstrap_calloc) != 0) {
239			return true;
240		}
241		postponed_mutexes = postponed_mutexes->postponed_next;
242	}
243#endif
244	return false;
245}
246