Deleted Added
sdiff udiff text old ( 115080 ) new ( 115278 )
full compact
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115278 2003-05-24 02:29:25Z deischen $
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"
35#include "lock.h"
36
37#define LCK_ASSERT assert
38#define MAX_SPINS 500
39
40void
41_lock_destroy(struct lock *lck)
42{
43
44 if ((lck != NULL) && (lck->l_head != NULL)) {
45 free(lck->l_head);
46 lck->l_head = NULL;
47 lck->l_tail = NULL;
48 }
49}
50
51int
52_lock_init(struct lock *lck, enum lock_type ltype,
53 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
54{
55
56 if (lck == NULL)
57 return (-1);
58 else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
67 lck->l_head->lr_active = 1;
68 lck->l_tail = lck->l_head;
69 }
70 return (0);
71}
72
73int
74_lockuser_init(struct lockuser *lu, void *priv)
75{
76
77 if (lu == NULL)
78 return (-1);
79 else if ((lu->lu_myreq == NULL) &&
80 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
81 return (-1);
82 else {
83 lu->lu_myreq->lr_locked = 1;
84 lu->lu_myreq->lr_watcher = NULL;
85 lu->lu_myreq->lr_owner = lu;
86 lu->lu_myreq->lr_active = 0;
87 lu->lu_watchreq = NULL;
88 lu->lu_priority = 0;
89 lu->lu_private = priv;
90 lu->lu_private2 = NULL;
91 }
92 return (0);
93}
94
95void
96_lockuser_destroy(struct lockuser *lu)
97{
98
99 if ((lu != NULL) && (lu->lu_myreq != NULL))
100 free(lu->lu_myreq);
101}
102
103/*
104 * Acquire a lock waiting (spin or sleep) for it to become available.
105 */
106void
107_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
108{
109 int i;
110 long lval;
111
112 /**
113 * XXX - We probably want to remove these checks to optimize
114 * performance. It is also a bug if any one of the
115 * checks fail, so it's probably better to just let it
116 * SEGV and fix it.
117 */
118#if 0
119 if (lck == NULL || lu == NULL || lck->l_head == NULL)
120 return;
121#endif
122 if ((lck->l_type & LCK_PRIORITY) == 0)
123 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
124 else {
125 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
126 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
127 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
128 LCK_ASSERT(lu->lu_watchreq == NULL);
129
130 lu->lu_priority = prio;
131 /*
132 * Atomically swap the head of the lock request with
133 * this request.
134 */
135 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
136 }
137
138 if (lu->lu_watchreq->lr_locked != 0) {
139 atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
140 if ((lck->l_wait == NULL) ||
141 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
142 while (lu->lu_watchreq->lr_locked == 0)
143 ; /* spin, then yield? */
144 } else {
145 /*
146 * Spin for a bit before invoking the wait function.
147 *
148 * We should be a little smarter here. If we're
149 * running on a single processor, then the lock
150 * owner got preempted and spinning will accomplish
151 * nothing but waste time. If we're running on
152 * multiple processors, the owner could be running
153 * on another CPU and we might acquire the lock if
154 * we spin for a bit.
155 *
156 * The other thing to keep in mind is that threads
157 * acquiring these locks are considered to be in
158 * critical regions; they will not be preempted by
159 * the _UTS_ until they release the lock. It is
160 * therefore safe to assume that if a lock can't
161 * be acquired, it is currently held by a thread
162 * running in another KSE.
163 */
164 for (i = 0; i < MAX_SPINS; i++) {
165 if (lu->lu_watchreq->lr_locked == 0)
166 return;
167 if (lu->lu_watchreq->lr_active == 0)
168 break;
169 }
170 atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
171 2, &lval);
172 if (lval == 0)
173 lu->lu_watchreq->lr_locked = 0;
174 else
175 lck->l_wait(lck, lu);
176
177 }
178 }
179 lu->lu_myreq->lr_active = 1;
180}
181
182/*
183 * Release a lock.
184 */
185void
186_lock_release(struct lock *lck, struct lockuser *lu)
187{
188 struct lockuser *lu_tmp, *lu_h;
189 struct lockreq *myreq;
190 int prio_h;
191 long lval;
192
193 /**
194 * XXX - We probably want to remove these checks to optimize
195 * performance. It is also a bug if any one of the
196 * checks fail, so it's probably better to just let it
197 * SEGV and fix it.
198 */
199#if 0
200 if ((lck == NULL) || (lu == NULL))
201 return;
202#endif
203 if ((lck->l_type & LCK_PRIORITY) != 0) {
204 prio_h = 0;
205 lu_h = NULL;
206
207 /* Update tail if our request is last. */
208 if (lu->lu_watchreq->lr_owner == NULL) {
209 atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
210 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
211 } else {
212 /* Remove ourselves from the list. */
213 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
214 lu->lu_watchreq->lr_owner);
215 atomic_store_rel_ptr(
216 &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
217 }
218 /*
219 * The watch request now becomes our own because we've
220 * traded away our previous request. Save our previous
221 * request so that we can grant the lock.
222 */
223 myreq = lu->lu_myreq;
224 lu->lu_myreq = lu->lu_watchreq;
225 lu->lu_watchreq = NULL;
226 lu->lu_myreq->lr_locked = 1;
227 lu->lu_myreq->lr_owner = lu;
228 lu->lu_myreq->lr_watcher = NULL;
229 /*
230 * Traverse the list of lock requests in reverse order
231 * looking for the user with the highest priority.
232 */
233 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
234 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
235 if (lu_tmp->lu_priority > prio_h) {
236 lu_h = lu_tmp;
237 prio_h = lu_tmp->lu_priority;
238 }
239 }
240 if (lu_h != NULL) {
241 /* Give the lock to the highest priority user. */
242 if (lck->l_wakeup != NULL) {
243 atomic_swap_long(
244 (long *)&lu_h->lu_watchreq->lr_locked,
245 0, &lval);
246 if (lval == 2)
247 /* Notify the sleeper */
248 lck->l_wakeup(lck,
249 lu_h->lu_myreq->lr_watcher);
250 }
251 else
252 atomic_store_rel_long(
253 &lu_h->lu_watchreq->lr_locked, 0);
254 } else {
255 if (lck->l_wakeup != NULL) {
256 atomic_swap_long((long *)&myreq->lr_locked,
257 0, &lval);
258 if (lval == 2)
259 /* Notify the sleeper */
260 lck->l_wakeup(lck, myreq->lr_watcher);
261 }
262 else
263 /* Give the lock to the previous request. */
264 atomic_store_rel_long(&myreq->lr_locked, 0);
265 }
266 } else {
267 /*
268 * The watch request now becomes our own because we've
269 * traded away our previous request. Save our previous
270 * request so that we can grant the lock.
271 */
272 myreq = lu->lu_myreq;
273 lu->lu_myreq = lu->lu_watchreq;
274 lu->lu_watchreq = NULL;
275 lu->lu_myreq->lr_locked = 1;
276 if (lck->l_wakeup) {
277 atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
278 if (lval == 2)
279 /* Notify the sleeper */
280 lck->l_wakeup(lck, myreq->lr_watcher);
281 }
282 else
283 /* Give the lock to the previous request. */
284 atomic_store_rel_long(&myreq->lr_locked, 0);
285 }
286 lu->lu_myreq->lr_active = 0;
287}
288
289void
290_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
291{
292 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
293}
294
295void
296_lockuser_setactive(struct lockuser *lu, int active)
297{
298 lu->lu_myreq->lr_active = active;
299}
300