rtld_lock.c revision 191292
1/*-
2 * Copyright 1999, 2000 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 *	from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
26 * $FreeBSD: head/libexec/rtld-elf/rtld_lock.c 191292 2009-04-19 23:03:57Z rwatson $
27 */
28
29/*
30 * Thread locking implementation for the dynamic linker.
31 *
32 * We use the "simple, non-scalable reader-preference lock" from:
33 *
34 *   J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
35 *   Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
36 *   Principles and Practice of Parallel Programming, April 1991.
37 *
38 * In this algorithm the lock is a single word.  Its low-order bit is
39 * set when a writer holds the lock.  The remaining high-order bits
40 * contain a count of readers desiring the lock.  The algorithm requires
41 * atomic "compare_and_store" and "add" operations, which we implement
42 * using assembly language sequences in "rtld_start.S".
43 */
44
45#include <machine/param.h>
46
47#include <signal.h>
48#include <stdlib.h>
49#include <time.h>
50
51#include "debug.h"
52#include "rtld.h"
53#include "rtld_machdep.h"
54
55#define WAFLAG		0x1	/* A writer holds the lock */
56#define RC_INCR		0x2	/* Adjusts count of readers desiring lock */
57
58typedef struct Struct_Lock {
59	volatile u_int lock;
60	void *base;
61} Lock;
62
63static sigset_t fullsigmask, oldsigmask;
64static int thread_flag;
65
66static void *
67def_lock_create()
68{
69    void *base;
70    char *p;
71    uintptr_t r;
72    Lock *l;
73
74    /*
75     * Arrange for the lock to occupy its own cache line.  First, we
76     * optimistically allocate just a cache line, hoping that malloc
77     * will give us a well-aligned block of memory.  If that doesn't
78     * work, we allocate a larger block and take a well-aligned cache
79     * line from it.
80     */
81    base = xmalloc(CACHE_LINE_SIZE);
82    p = (char *)base;
83    if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
84	free(base);
85	base = xmalloc(2 * CACHE_LINE_SIZE);
86	p = (char *)base;
87	if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
88	    p += CACHE_LINE_SIZE - r;
89    }
90    l = (Lock *)p;
91    l->base = base;
92    l->lock = 0;
93    return l;
94}
95
96static void
97def_lock_destroy(void *lock)
98{
99    Lock *l = (Lock *)lock;
100
101    free(l->base);
102}
103
104static void
105def_rlock_acquire(void *lock)
106{
107    Lock *l = (Lock *)lock;
108
109    atomic_add_acq_int(&l->lock, RC_INCR);
110    while (l->lock & WAFLAG)
111	    ;	/* Spin */
112}
113
114static void
115def_wlock_acquire(void *lock)
116{
117    Lock *l = (Lock *)lock;
118    sigset_t tmp_oldsigmask;
119
120    for ( ; ; ) {
121	sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
122	if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
123	    break;
124	sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
125    }
126    oldsigmask = tmp_oldsigmask;
127}
128
129static void
130def_lock_release(void *lock)
131{
132    Lock *l = (Lock *)lock;
133
134    if ((l->lock & WAFLAG) == 0)
135    	atomic_add_rel_int(&l->lock, -RC_INCR);
136    else {
137    	atomic_add_rel_int(&l->lock, -WAFLAG);
138    	sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
139    }
140}
141
142static int
143def_thread_set_flag(int mask)
144{
145	int old_val = thread_flag;
146	thread_flag |= mask;
147	return (old_val);
148}
149
150static int
151def_thread_clr_flag(int mask)
152{
153	int old_val = thread_flag;
154	thread_flag &= ~mask;
155	return (old_val);
156}
157
158/*
159 * Public interface exposed to the rest of the dynamic linker.
160 */
161static struct RtldLockInfo lockinfo;
162static struct RtldLockInfo deflockinfo;
163
164static __inline int
165thread_mask_set(int mask)
166{
167	return lockinfo.thread_set_flag(mask);
168}
169
170static __inline void
171thread_mask_clear(int mask)
172{
173	lockinfo.thread_clr_flag(mask);
174}
175
176#define	RTLD_LOCK_CNT	3
177struct rtld_lock {
178	void	*handle;
179	int	 mask;
180} rtld_locks[RTLD_LOCK_CNT];
181
182rtld_lock_t	rtld_bind_lock = &rtld_locks[0];
183rtld_lock_t	rtld_libc_lock = &rtld_locks[1];
184rtld_lock_t	rtld_phdr_lock = &rtld_locks[2];
185
186int
187rlock_acquire(rtld_lock_t lock)
188{
189	if (thread_mask_set(lock->mask) & lock->mask) {
190	    dbg("rlock_acquire: recursed");
191	    return (0);
192	}
193	lockinfo.rlock_acquire(lock->handle);
194	return (1);
195}
196
197int
198wlock_acquire(rtld_lock_t lock)
199{
200	if (thread_mask_set(lock->mask) & lock->mask) {
201	    dbg("wlock_acquire: recursed");
202	    return (0);
203	}
204	lockinfo.wlock_acquire(lock->handle);
205	return (1);
206}
207
208void
209rlock_release(rtld_lock_t lock, int locked)
210{
211	if (locked == 0)
212	    return;
213	thread_mask_clear(lock->mask);
214	lockinfo.lock_release(lock->handle);
215}
216
217void
218wlock_release(rtld_lock_t lock, int locked)
219{
220	if (locked == 0)
221	    return;
222	thread_mask_clear(lock->mask);
223	lockinfo.lock_release(lock->handle);
224}
225
226void
227lockdflt_init()
228{
229    int i;
230
231    deflockinfo.rtli_version  = RTLI_VERSION;
232    deflockinfo.lock_create   = def_lock_create;
233    deflockinfo.lock_destroy  = def_lock_destroy;
234    deflockinfo.rlock_acquire = def_rlock_acquire;
235    deflockinfo.wlock_acquire = def_wlock_acquire;
236    deflockinfo.lock_release  = def_lock_release;
237    deflockinfo.thread_set_flag = def_thread_set_flag;
238    deflockinfo.thread_clr_flag = def_thread_clr_flag;
239    deflockinfo.at_fork = NULL;
240
241    for (i = 0; i < RTLD_LOCK_CNT; i++) {
242	    rtld_locks[i].mask   = (1 << i);
243	    rtld_locks[i].handle = NULL;
244    }
245
246    memcpy(&lockinfo, &deflockinfo, sizeof(lockinfo));
247    _rtld_thread_init(NULL);
248    /*
249     * Construct a mask to block all signals except traps which might
250     * conceivably be generated within the dynamic linker itself.
251     */
252    sigfillset(&fullsigmask);
253    sigdelset(&fullsigmask, SIGILL);
254    sigdelset(&fullsigmask, SIGTRAP);
255    sigdelset(&fullsigmask, SIGABRT);
256    sigdelset(&fullsigmask, SIGEMT);
257    sigdelset(&fullsigmask, SIGFPE);
258    sigdelset(&fullsigmask, SIGBUS);
259    sigdelset(&fullsigmask, SIGSEGV);
260    sigdelset(&fullsigmask, SIGSYS);
261}
262
263/*
264 * Callback function to allow threads implementation to
265 * register their own locking primitives if the default
266 * one is not suitable.
267 * The current context should be the only context
268 * executing at the invocation time.
269 */
270void
271_rtld_thread_init(struct RtldLockInfo *pli)
272{
273	int flags, i;
274	void *locks[RTLD_LOCK_CNT];
275
276	/* disable all locking while this function is running */
277	flags =	thread_mask_set(~0);
278
279	if (pli == NULL)
280		pli = &deflockinfo;
281
282
283	for (i = 0; i < RTLD_LOCK_CNT; i++)
284		if ((locks[i] = pli->lock_create()) == NULL)
285			break;
286
287	if (i < RTLD_LOCK_CNT) {
288		while (--i >= 0)
289			pli->lock_destroy(locks[i]);
290		abort();
291	}
292
293	for (i = 0; i < RTLD_LOCK_CNT; i++) {
294		if (rtld_locks[i].handle == NULL)
295			continue;
296		if (flags & rtld_locks[i].mask)
297			lockinfo.lock_release(rtld_locks[i].handle);
298		lockinfo.lock_destroy(rtld_locks[i].handle);
299	}
300
301	for (i = 0; i < RTLD_LOCK_CNT; i++) {
302		rtld_locks[i].handle = locks[i];
303		if (flags & rtld_locks[i].mask)
304			pli->wlock_acquire(rtld_locks[i].handle);
305	}
306
307	lockinfo.lock_create = pli->lock_create;
308	lockinfo.lock_destroy = pli->lock_destroy;
309	lockinfo.rlock_acquire = pli->rlock_acquire;
310	lockinfo.wlock_acquire = pli->wlock_acquire;
311	lockinfo.lock_release  = pli->lock_release;
312	lockinfo.thread_set_flag = pli->thread_set_flag;
313	lockinfo.thread_clr_flag = pli->thread_clr_flag;
314	lockinfo.at_fork = pli->at_fork;
315
316	/* restore thread locking state, this time with new locks */
317	thread_mask_clear(~0);
318	thread_mask_set(flags);
319	dbg("_rtld_thread_init: done");
320}
321
322void
323_rtld_atfork_pre(int *locks)
324{
325
326	locks[2] = wlock_acquire(rtld_phdr_lock);
327	locks[0] = rlock_acquire(rtld_bind_lock);
328}
329
330void
331_rtld_atfork_post(int *locks)
332{
333
334	rlock_release(rtld_bind_lock, locks[0]);
335	wlock_release(rtld_phdr_lock, locks[2]);
336}
337