1113657Sdeischen/*-
2113661Sdeischen * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3113657Sdeischen * All rights reserved.
4113657Sdeischen *
5113657Sdeischen * Redistribution and use in source and binary forms, with or without
6113657Sdeischen * modification, are permitted provided that the following conditions
7113657Sdeischen * are met:
8113657Sdeischen * 1. Redistributions of source code must retain the above copyright
9113657Sdeischen *    notice, this list of conditions and the following disclaimer.
10113657Sdeischen * 2. Redistributions in binary form must reproduce the above copyright
11113657Sdeischen *    notice, this list of conditions and the following disclaimer in the
12113657Sdeischen *    documentation and/or other materials provided with the distribution.
13113657Sdeischen *
14113657Sdeischen * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15113657Sdeischen * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16113657Sdeischen * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17113657Sdeischen * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18113657Sdeischen * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19113657Sdeischen * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20113657Sdeischen * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21113657Sdeischen * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22113657Sdeischen * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23113657Sdeischen * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24113657Sdeischen * SUCH DAMAGE.
25113657Sdeischen *
26113657Sdeischen * $FreeBSD$
27113657Sdeischen */
28113657Sdeischen
29113657Sdeischen#include <sys/types.h>
30113657Sdeischen#include <machine/atomic.h>
31113657Sdeischen#include <assert.h>
32113657Sdeischen#include <stdlib.h>
33113657Sdeischen
34113657Sdeischen#include "atomic_ops.h"
35113657Sdeischen#include "lock.h"
36113657Sdeischen
37120658Sdavidxu#ifdef _LOCK_DEBUG
38120658Sdavidxu#define	LCK_ASSERT(e)	assert(e)
39120658Sdavidxu#else
40120658Sdavidxu#define LCK_ASSERT(e)
41120658Sdavidxu#endif
42120658Sdavidxu
43113657Sdeischen#define	MAX_SPINS	500
44113657Sdeischen
45113657Sdeischenvoid
46113657Sdeischen_lock_destroy(struct lock *lck)
47113657Sdeischen{
48113657Sdeischen	if ((lck != NULL) && (lck->l_head != NULL)) {
49113657Sdeischen		free(lck->l_head);
50113657Sdeischen		lck->l_head = NULL;
51113657Sdeischen		lck->l_tail = NULL;
52113657Sdeischen	}
53113657Sdeischen}
54113657Sdeischen
55113657Sdeischenint
56113657Sdeischen_lock_init(struct lock *lck, enum lock_type ltype,
57173967Sjasone    lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
58173967Sjasone    void *(calloc_cb)(size_t, size_t))
59113657Sdeischen{
60113657Sdeischen	if (lck == NULL)
61113657Sdeischen		return (-1);
62173967Sjasone	else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
63113657Sdeischen		return (-1);
64113657Sdeischen	else {
65113657Sdeischen		lck->l_type = ltype;
66113657Sdeischen		lck->l_wait = waitfunc;
67113657Sdeischen		lck->l_wakeup = wakeupfunc;
68113657Sdeischen		lck->l_head->lr_locked = 0;
69113657Sdeischen		lck->l_head->lr_watcher = NULL;
70113657Sdeischen		lck->l_head->lr_owner = NULL;
71115080Sdeischen		lck->l_head->lr_active = 1;
72113657Sdeischen		lck->l_tail = lck->l_head;
73113657Sdeischen	}
74113657Sdeischen	return (0);
75113657Sdeischen}
76113657Sdeischen
77113657Sdeischenint
78122074Sdeischen_lock_reinit(struct lock *lck, enum lock_type ltype,
79122074Sdeischen    lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
80122074Sdeischen{
81122074Sdeischen	if (lck == NULL)
82122074Sdeischen		return (-1);
83122074Sdeischen	else if (lck->l_head == NULL)
84173967Sjasone		return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
85122074Sdeischen	else {
86122074Sdeischen		lck->l_head->lr_locked = 0;
87122074Sdeischen		lck->l_head->lr_watcher = NULL;
88122074Sdeischen		lck->l_head->lr_owner = NULL;
89122074Sdeischen		lck->l_head->lr_active = 1;
90122074Sdeischen		lck->l_tail = lck->l_head;
91122074Sdeischen	}
92122074Sdeischen	return (0);
93122074Sdeischen}
94122074Sdeischen
95122074Sdeischenint
96113657Sdeischen_lockuser_init(struct lockuser *lu, void *priv)
97113657Sdeischen{
98113657Sdeischen	if (lu == NULL)
99113657Sdeischen		return (-1);
100113657Sdeischen	else if ((lu->lu_myreq == NULL) &&
101113657Sdeischen	    ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
102113657Sdeischen		return (-1);
103113657Sdeischen	else {
104113657Sdeischen		lu->lu_myreq->lr_locked = 1;
105113657Sdeischen		lu->lu_myreq->lr_watcher = NULL;
106113657Sdeischen		lu->lu_myreq->lr_owner = lu;
107115080Sdeischen		lu->lu_myreq->lr_active = 0;
108113657Sdeischen		lu->lu_watchreq = NULL;
109113657Sdeischen		lu->lu_priority = 0;
110113657Sdeischen		lu->lu_private = priv;
111113657Sdeischen		lu->lu_private2 = NULL;
112113657Sdeischen	}
113113657Sdeischen	return (0);
114113657Sdeischen}
115113657Sdeischen
116122074Sdeischenint
117122074Sdeischen_lockuser_reinit(struct lockuser *lu, void *priv)
118122074Sdeischen{
119122074Sdeischen	if (lu == NULL)
120122074Sdeischen		return (-1);
121175864Sdeischen	if (lu->lu_watchreq != NULL) {
122175864Sdeischen		/*
123175864Sdeischen		 * In this case the lock is active.  All lockusers
124175864Sdeischen		 * keep their watch request and drop their own
125175864Sdeischen		 * (lu_myreq) request.  Their own request is either
126175864Sdeischen		 * some other lockuser's watch request or is the
127175864Sdeischen		 * head of the lock.
128175864Sdeischen		 */
129175864Sdeischen		lu->lu_myreq = lu->lu_watchreq;
130175864Sdeischen		lu->lu_watchreq = NULL;
131175864Sdeischen	}
132122074Sdeischen	if (lu->lu_myreq == NULL)
133175864Sdeischen		/*
134175864Sdeischen		 * Oops, something isn't quite right.  Try to
135175864Sdeischen		 * allocate one.
136175864Sdeischen		 */
137122074Sdeischen		return (_lockuser_init(lu, priv));
138122074Sdeischen	else {
139122074Sdeischen		lu->lu_myreq->lr_locked = 1;
140122074Sdeischen		lu->lu_myreq->lr_watcher = NULL;
141122074Sdeischen		lu->lu_myreq->lr_owner = lu;
142122074Sdeischen		lu->lu_myreq->lr_active = 0;
143122074Sdeischen		lu->lu_watchreq = NULL;
144122074Sdeischen		lu->lu_priority = 0;
145122074Sdeischen		lu->lu_private = priv;
146122074Sdeischen		lu->lu_private2 = NULL;
147122074Sdeischen	}
148122074Sdeischen	return (0);
149122074Sdeischen}
150122074Sdeischen
151113657Sdeischenvoid
152113657Sdeischen_lockuser_destroy(struct lockuser *lu)
153113657Sdeischen{
154113657Sdeischen	if ((lu != NULL) && (lu->lu_myreq != NULL))
155113657Sdeischen		free(lu->lu_myreq);
156113657Sdeischen}
157113657Sdeischen
158113657Sdeischen/*
159113657Sdeischen * Acquire a lock waiting (spin or sleep) for it to become available.
160113657Sdeischen */
161113657Sdeischenvoid
162113657Sdeischen_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
163113657Sdeischen{
164113657Sdeischen	int i;
165119723Sdeischen	int lval;
166113657Sdeischen
167113657Sdeischen	/**
168113657Sdeischen	 * XXX - We probably want to remove these checks to optimize
169113657Sdeischen	 *       performance.  It is also a bug if any one of the
170113657Sdeischen	 *       checks fail, so it's probably better to just let it
171113657Sdeischen	 *       SEGV and fix it.
172113657Sdeischen	 */
173113657Sdeischen#if 0
174113657Sdeischen	if (lck == NULL || lu == NULL || lck->l_head == NULL)
175113657Sdeischen		return;
176113657Sdeischen#endif
177122074Sdeischen	if ((lck->l_type & LCK_PRIORITY) != 0) {
178113657Sdeischen		LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
179113657Sdeischen		LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
180113657Sdeischen		LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
181113657Sdeischen		LCK_ASSERT(lu->lu_watchreq == NULL);
182113657Sdeischen
183113657Sdeischen		lu->lu_priority = prio;
184113657Sdeischen	}
185122074Sdeischen	/*
186122074Sdeischen	 * Atomically swap the head of the lock request with
187122074Sdeischen	 * this request.
188122074Sdeischen	 */
189174112Sdeischen	atomic_swap_ptr((void *)&lck->l_head, lu->lu_myreq,
190174112Sdeischen	    (void *)&lu->lu_watchreq);
191113657Sdeischen
192113657Sdeischen	if (lu->lu_watchreq->lr_locked != 0) {
193148542Sdeischen		atomic_store_rel_ptr
194174112Sdeischen		    ((volatile uintptr_t *)(void *)&lu->lu_watchreq->lr_watcher,
195148542Sdeischen		    (uintptr_t)lu);
196113657Sdeischen		if ((lck->l_wait == NULL) ||
197113657Sdeischen		    ((lck->l_type & LCK_ADAPTIVE) == 0)) {
198142670Sdelphij			while (lu->lu_watchreq->lr_locked != 0)
199113657Sdeischen				;	/* spin, then yield? */
200113657Sdeischen		} else {
201113657Sdeischen			/*
202113657Sdeischen			 * Spin for a bit before invoking the wait function.
203113657Sdeischen			 *
204113657Sdeischen			 * We should be a little smarter here.  If we're
205113657Sdeischen			 * running on a single processor, then the lock
206113657Sdeischen			 * owner got preempted and spinning will accomplish
207113657Sdeischen			 * nothing but waste time.  If we're running on
208113657Sdeischen			 * multiple processors, the owner could be running
209113657Sdeischen			 * on another CPU and we might acquire the lock if
210113657Sdeischen			 * we spin for a bit.
211113657Sdeischen			 *
212113657Sdeischen			 * The other thing to keep in mind is that threads
213113657Sdeischen			 * acquiring these locks are considered to be in
214113657Sdeischen			 * critical regions; they will not be preempted by
215113657Sdeischen			 * the _UTS_ until they release the lock.  It is
216113657Sdeischen			 * therefore safe to assume that if a lock can't
217113657Sdeischen			 * be acquired, it is currently held by a thread
218113657Sdeischen			 * running in another KSE.
219113657Sdeischen			 */
220113657Sdeischen			for (i = 0; i < MAX_SPINS; i++) {
221113657Sdeischen				if (lu->lu_watchreq->lr_locked == 0)
222113657Sdeischen					return;
223115080Sdeischen				if (lu->lu_watchreq->lr_active == 0)
224115080Sdeischen					break;
225113657Sdeischen			}
226174112Sdeischen			atomic_swap_int(&lu->lu_watchreq->lr_locked,
227115278Sdeischen			    2, &lval);
228115278Sdeischen			if (lval == 0)
229115278Sdeischen				lu->lu_watchreq->lr_locked = 0;
230115278Sdeischen			else
231113657Sdeischen				lck->l_wait(lck, lu);
232115278Sdeischen
233113657Sdeischen		}
234113657Sdeischen	}
235115080Sdeischen	lu->lu_myreq->lr_active = 1;
236113657Sdeischen}
237113657Sdeischen
238113657Sdeischen/*
239113657Sdeischen * Release a lock.
240113657Sdeischen */
241113657Sdeischenvoid
242113657Sdeischen_lock_release(struct lock *lck, struct lockuser *lu)
243113657Sdeischen{
244113657Sdeischen	struct lockuser *lu_tmp, *lu_h;
245113657Sdeischen	struct lockreq *myreq;
246113657Sdeischen	int prio_h;
247119723Sdeischen	int lval;
248113657Sdeischen
249113657Sdeischen	/**
250113657Sdeischen	 * XXX - We probably want to remove these checks to optimize
251113657Sdeischen	 *       performance.  It is also a bug if any one of the
252113657Sdeischen	 *       checks fail, so it's probably better to just let it
253113657Sdeischen	 *       SEGV and fix it.
254113657Sdeischen	 */
255113657Sdeischen#if 0
256113657Sdeischen	if ((lck == NULL) || (lu == NULL))
257113657Sdeischen		return;
258113657Sdeischen#endif
259113657Sdeischen	if ((lck->l_type & LCK_PRIORITY) != 0) {
260113657Sdeischen		prio_h = 0;
261113657Sdeischen		lu_h = NULL;
262113657Sdeischen
263113657Sdeischen		/* Update tail if our request is last. */
264113657Sdeischen		if (lu->lu_watchreq->lr_owner == NULL) {
265174112Sdeischen			atomic_store_rel_ptr((volatile uintptr_t *)
266174112Sdeischen			    (void *)&lck->l_tail,
267148542Sdeischen			    (uintptr_t)lu->lu_myreq);
268174112Sdeischen			atomic_store_rel_ptr((volatile uintptr_t *)
269174112Sdeischen			    (void *)&lu->lu_myreq->lr_owner,
270148542Sdeischen			    (uintptr_t)NULL);
271113657Sdeischen		} else {
272113657Sdeischen			/* Remove ourselves from the list. */
273148542Sdeischen			atomic_store_rel_ptr((volatile uintptr_t *)
274174112Sdeischen			    (void *)&lu->lu_myreq->lr_owner,
275148542Sdeischen			    (uintptr_t)lu->lu_watchreq->lr_owner);
276148542Sdeischen			atomic_store_rel_ptr((volatile uintptr_t *)
277174112Sdeischen			    (void *)&lu->lu_watchreq->lr_owner->lu_myreq,
278148542Sdeischen			    (uintptr_t)lu->lu_myreq);
279113657Sdeischen		}
280113657Sdeischen		/*
281113657Sdeischen		 * The watch request now becomes our own because we've
282113657Sdeischen		 * traded away our previous request.  Save our previous
283113657Sdeischen		 * request so that we can grant the lock.
284113657Sdeischen		 */
285113657Sdeischen		myreq = lu->lu_myreq;
286113657Sdeischen		lu->lu_myreq = lu->lu_watchreq;
287113657Sdeischen		lu->lu_watchreq = NULL;
288113657Sdeischen		lu->lu_myreq->lr_locked = 1;
289113657Sdeischen		lu->lu_myreq->lr_owner = lu;
290113657Sdeischen		lu->lu_myreq->lr_watcher = NULL;
291113657Sdeischen		/*
292113657Sdeischen		 * Traverse the list of lock requests in reverse order
293113657Sdeischen		 * looking for the user with the highest priority.
294113657Sdeischen		 */
295113657Sdeischen		for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
296113657Sdeischen		     lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
297113657Sdeischen			if (lu_tmp->lu_priority > prio_h) {
298113657Sdeischen				lu_h = lu_tmp;
299113657Sdeischen				prio_h = lu_tmp->lu_priority;
300113657Sdeischen			}
301113657Sdeischen		}
302113657Sdeischen		if (lu_h != NULL) {
303113657Sdeischen			/* Give the lock to the highest priority user. */
304115278Sdeischen			if (lck->l_wakeup != NULL) {
305119723Sdeischen				atomic_swap_int(
306174112Sdeischen				    &lu_h->lu_watchreq->lr_locked,
307115278Sdeischen				    0, &lval);
308115278Sdeischen				if (lval == 2)
309115278Sdeischen					/* Notify the sleeper */
310115278Sdeischen					lck->l_wakeup(lck,
311115278Sdeischen					    lu_h->lu_myreq->lr_watcher);
312115278Sdeischen			}
313115080Sdeischen			else
314119723Sdeischen				atomic_store_rel_int(
315115278Sdeischen				    &lu_h->lu_watchreq->lr_locked, 0);
316113657Sdeischen		} else {
317115278Sdeischen			if (lck->l_wakeup != NULL) {
318174112Sdeischen				atomic_swap_int(&myreq->lr_locked,
319115278Sdeischen				    0, &lval);
320115278Sdeischen				if (lval == 2)
321115278Sdeischen					/* Notify the sleeper */
322115278Sdeischen					lck->l_wakeup(lck, myreq->lr_watcher);
323115278Sdeischen			}
324115080Sdeischen			else
325115080Sdeischen				/* Give the lock to the previous request. */
326119723Sdeischen				atomic_store_rel_int(&myreq->lr_locked, 0);
327113657Sdeischen		}
328113657Sdeischen	} else {
329113657Sdeischen		/*
330113657Sdeischen		 * The watch request now becomes our own because we've
331113657Sdeischen		 * traded away our previous request.  Save our previous
332113657Sdeischen		 * request so that we can grant the lock.
333113657Sdeischen		 */
334113657Sdeischen		myreq = lu->lu_myreq;
335113657Sdeischen		lu->lu_myreq = lu->lu_watchreq;
336113657Sdeischen		lu->lu_watchreq = NULL;
337113657Sdeischen		lu->lu_myreq->lr_locked = 1;
338115278Sdeischen		if (lck->l_wakeup) {
339174112Sdeischen			atomic_swap_int(&myreq->lr_locked, 0, &lval);
340115278Sdeischen			if (lval == 2)
341115278Sdeischen				/* Notify the sleeper */
342115278Sdeischen				lck->l_wakeup(lck, myreq->lr_watcher);
343115278Sdeischen		}
344115080Sdeischen		else
345114680Sdeischen			/* Give the lock to the previous request. */
346119723Sdeischen			atomic_store_rel_int(&myreq->lr_locked, 0);
347113657Sdeischen	}
348115080Sdeischen	lu->lu_myreq->lr_active = 0;
349113657Sdeischen}
350115080Sdeischen
351115080Sdeischenvoid
352174112Sdeischen_lock_grant(struct lock *lck __unused /* unused */, struct lockuser *lu)
353115080Sdeischen{
354119723Sdeischen	atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
355115080Sdeischen}
356115080Sdeischen
357115080Sdeischenvoid
358115080Sdeischen_lockuser_setactive(struct lockuser *lu, int active)
359115080Sdeischen{
360115080Sdeischen	lu->lu_myreq->lr_active = active;
361115080Sdeischen}
362115080Sdeischen
363