Deleted Added
sdiff udiff text old ( 115080 ) new ( 115278 )
full compact
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115080 2003-05-16 19:58:30Z deischen $
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"
35#include "lock.h"
36
37#define LCK_ASSERT assert
38#define MAX_SPINS 500
39
40void
41_lock_destroy(struct lock *lck)
42{
43
44 if ((lck != NULL) && (lck->l_head != NULL)) {
45 free(lck->l_head);
46 lck->l_head = NULL;
47 lck->l_tail = NULL;
48 }
49}
50
51int
52_lock_init(struct lock *lck, enum lock_type ltype,
53 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
54{
55
56 if (lck == NULL)
57 return (-1);
58 else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
67 lck->l_head->lr_waiting = 0;
68 lck->l_head->lr_active = 1;
69 lck->l_tail = lck->l_head;
70 }
71 return (0);
72}
73
74int
75_lockuser_init(struct lockuser *lu, void *priv)
76{
77
78 if (lu == NULL)
79 return (-1);
80 else if ((lu->lu_myreq == NULL) &&
81 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
82 return (-1);
83 else {
84 lu->lu_myreq->lr_locked = 1;
85 lu->lu_myreq->lr_watcher = NULL;
86 lu->lu_myreq->lr_owner = lu;
87 lu->lu_myreq->lr_waiting = 0;
88 lu->lu_myreq->lr_active = 0;
89 lu->lu_watchreq = NULL;
90 lu->lu_priority = 0;
91 lu->lu_private = priv;
92 lu->lu_private2 = NULL;
93 }
94 return (0);
95}
96
97void
98_lockuser_destroy(struct lockuser *lu)
99{
100
101 if ((lu != NULL) && (lu->lu_myreq != NULL))
102 free(lu->lu_myreq);
103}
104
105/*
106 * Acquire a lock waiting (spin or sleep) for it to become available.
107 */
108void
109_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
110{
111 int i;
112
113 /**
114 * XXX - We probably want to remove these checks to optimize
115 * performance. It is also a bug if any one of the
116 * checks fail, so it's probably better to just let it
117 * SEGV and fix it.
118 */
119#if 0
120 if (lck == NULL || lu == NULL || lck->l_head == NULL)
121 return;
122#endif
123 if ((lck->l_type & LCK_PRIORITY) == 0)
124 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
125 else {
126 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
127 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
128 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
129 LCK_ASSERT(lu->lu_myreq->lr_waiting == 0);
130 LCK_ASSERT(lu->lu_watchreq == NULL);
131
132 lu->lu_priority = prio;
133 /*
134 * Atomically swap the head of the lock request with
135 * this request.
136 */
137 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
138 }
139
140 if (lu->lu_watchreq->lr_locked != 0) {
141 atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
142 if ((lck->l_wait == NULL) ||
143 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
144 while (lu->lu_watchreq->lr_locked == 0)
145 ; /* spin, then yield? */
146 } else {
147 /*
148 * Spin for a bit before invoking the wait function.
149 *
150 * We should be a little smarter here. If we're
151 * running on a single processor, then the lock
152 * owner got preempted and spinning will accomplish
153 * nothing but waste time. If we're running on
154 * multiple processors, the owner could be running
155 * on another CPU and we might acquire the lock if
156 * we spin for a bit.
157 *
158 * The other thing to keep in mind is that threads
159 * acquiring these locks are considered to be in
160 * critical regions; they will not be preempted by
161 * the _UTS_ until they release the lock. It is
162 * therefore safe to assume that if a lock can't
163 * be acquired, it is currently held by a thread
164 * running in another KSE.
165 */
166 for (i = 0; i < MAX_SPINS; i++) {
167 if (lu->lu_watchreq->lr_locked == 0)
168 return;
169 if (lu->lu_watchreq->lr_active == 0)
170 break;
171 }
172 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
173 while (lu->lu_watchreq->lr_locked != 0)
174 lck->l_wait(lck, lu);
175 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
176 }
177 }
178 lu->lu_myreq->lr_active = 1;
179}
180
181/*
182 * Release a lock.
183 */
184void
185_lock_release(struct lock *lck, struct lockuser *lu)
186{
187 struct lockuser *lu_tmp, *lu_h;
188 struct lockreq *myreq;
189 int prio_h;
190
191 /**
192 * XXX - We probably want to remove these checks to optimize
193 * performance. It is also a bug if any one of the
194 * checks fail, so it's probably better to just let it
195 * SEGV and fix it.
196 */
197#if 0
198 if ((lck == NULL) || (lu == NULL))
199 return;
200#endif
201 if ((lck->l_type & LCK_PRIORITY) != 0) {
202 prio_h = 0;
203 lu_h = NULL;
204
205 /* Update tail if our request is last. */
206 if (lu->lu_watchreq->lr_owner == NULL) {
207 atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
208 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
209 } else {
210 /* Remove ourselves from the list. */
211 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
212 lu->lu_watchreq->lr_owner);
213 atomic_store_rel_ptr(
214 &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
215 }
216 /*
217 * The watch request now becomes our own because we've
218 * traded away our previous request. Save our previous
219 * request so that we can grant the lock.
220 */
221 myreq = lu->lu_myreq;
222 lu->lu_myreq = lu->lu_watchreq;
223 lu->lu_watchreq = NULL;
224 lu->lu_myreq->lr_locked = 1;
225 lu->lu_myreq->lr_owner = lu;
226 lu->lu_myreq->lr_watcher = NULL;
227 lu->lu_myreq->lr_waiting = 0;
228 /*
229 * Traverse the list of lock requests in reverse order
230 * looking for the user with the highest priority.
231 */
232 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
233 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
234 if (lu_tmp->lu_priority > prio_h) {
235 lu_h = lu_tmp;
236 prio_h = lu_tmp->lu_priority;
237 }
238 }
239 if (lu_h != NULL) {
240 /* Give the lock to the highest priority user. */
241 if ((lu_h->lu_watchreq->lr_waiting != 0) &&
242 (lck->l_wakeup != NULL))
243 /* Notify the sleeper */
244 lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
245 else
246 atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
247 } else {
248 if ((myreq->lr_waiting != 0) &&
249 (lck->l_wakeup != NULL))
250 /* Notify the sleeper */
251 lck->l_wakeup(lck, myreq->lr_watcher);
252 else
253 /* Give the lock to the previous request. */
254 atomic_store_rel_long(&myreq->lr_locked, 0);
255 }
256 } else {
257 /*
258 * The watch request now becomes our own because we've
259 * traded away our previous request. Save our previous
260 * request so that we can grant the lock.
261 */
262 myreq = lu->lu_myreq;
263 lu->lu_myreq = lu->lu_watchreq;
264 lu->lu_watchreq = NULL;
265 lu->lu_myreq->lr_locked = 1;
266 lu->lu_myreq->lr_waiting = 0;
267 if (myreq->lr_waiting != 0 && lck->l_wakeup)
268 /* Notify the sleeper */
269 lck->l_wakeup(lck, myreq->lr_watcher);
270 else
271 /* Give the lock to the previous request. */
272 atomic_store_rel_long(&myreq->lr_locked, 0);
273 }
274 lu->lu_myreq->lr_active = 0;
275}
276
277void
278_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
279{
280 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0);
281}
282
283void
284_lockuser_setactive(struct lockuser *lu, int active)
285{
286 lu->lu_myreq->lr_active = active;
287}
288