Deleted Added
full compact
lock.c (115080) lock.c (115278)
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115080 2003-05-16 19:58:30Z deischen $
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115278 2003-05-24 02:29:25Z deischen $
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"

--- 24 unchanged lines hidden (view full) ---

59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"

--- 24 unchanged lines hidden (view full) ---

59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
67 lck->l_head->lr_waiting = 0;
68 lck->l_head->lr_active = 1;
69 lck->l_tail = lck->l_head;
70 }
71 return (0);
72}
73
74int
75_lockuser_init(struct lockuser *lu, void *priv)
76{
77
78 if (lu == NULL)
79 return (-1);
80 else if ((lu->lu_myreq == NULL) &&
81 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
82 return (-1);
83 else {
84 lu->lu_myreq->lr_locked = 1;
85 lu->lu_myreq->lr_watcher = NULL;
86 lu->lu_myreq->lr_owner = lu;
67 lck->l_head->lr_active = 1;
68 lck->l_tail = lck->l_head;
69 }
70 return (0);
71}
72
73int
74_lockuser_init(struct lockuser *lu, void *priv)
75{
76
77 if (lu == NULL)
78 return (-1);
79 else if ((lu->lu_myreq == NULL) &&
80 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
81 return (-1);
82 else {
83 lu->lu_myreq->lr_locked = 1;
84 lu->lu_myreq->lr_watcher = NULL;
85 lu->lu_myreq->lr_owner = lu;
87 lu->lu_myreq->lr_waiting = 0;
88 lu->lu_myreq->lr_active = 0;
89 lu->lu_watchreq = NULL;
90 lu->lu_priority = 0;
91 lu->lu_private = priv;
92 lu->lu_private2 = NULL;
93 }
94 return (0);
95}

--- 8 unchanged lines hidden (view full) ---

104
105/*
106 * Acquire a lock waiting (spin or sleep) for it to become available.
107 */
108void
109_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
110{
111 int i;
86 lu->lu_myreq->lr_active = 0;
87 lu->lu_watchreq = NULL;
88 lu->lu_priority = 0;
89 lu->lu_private = priv;
90 lu->lu_private2 = NULL;
91 }
92 return (0);
93}

--- 8 unchanged lines hidden (view full) ---

102
103/*
104 * Acquire a lock waiting (spin or sleep) for it to become available.
105 */
106void
107_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
108{
109 int i;
110 long lval;
112
113 /**
114 * XXX - We probably want to remove these checks to optimize
115 * performance. It is also a bug if any one of the
116 * checks fail, so it's probably better to just let it
117 * SEGV and fix it.
118 */
119#if 0
120 if (lck == NULL || lu == NULL || lck->l_head == NULL)
121 return;
122#endif
123 if ((lck->l_type & LCK_PRIORITY) == 0)
124 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
125 else {
126 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
127 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
128 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
111
112 /**
113 * XXX - We probably want to remove these checks to optimize
114 * performance. It is also a bug if any one of the
115 * checks fail, so it's probably better to just let it
116 * SEGV and fix it.
117 */
118#if 0
119 if (lck == NULL || lu == NULL || lck->l_head == NULL)
120 return;
121#endif
122 if ((lck->l_type & LCK_PRIORITY) == 0)
123 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
124 else {
125 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
126 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
127 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
129 LCK_ASSERT(lu->lu_myreq->lr_waiting == 0);
130 LCK_ASSERT(lu->lu_watchreq == NULL);
131
132 lu->lu_priority = prio;
133 /*
134 * Atomically swap the head of the lock request with
135 * this request.
136 */
137 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);

--- 26 unchanged lines hidden (view full) ---

164 * running in another KSE.
165 */
166 for (i = 0; i < MAX_SPINS; i++) {
167 if (lu->lu_watchreq->lr_locked == 0)
168 return;
169 if (lu->lu_watchreq->lr_active == 0)
170 break;
171 }
128 LCK_ASSERT(lu->lu_watchreq == NULL);
129
130 lu->lu_priority = prio;
131 /*
132 * Atomically swap the head of the lock request with
133 * this request.
134 */
135 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);

--- 26 unchanged lines hidden (view full) ---

162 * running in another KSE.
163 */
164 for (i = 0; i < MAX_SPINS; i++) {
165 if (lu->lu_watchreq->lr_locked == 0)
166 return;
167 if (lu->lu_watchreq->lr_active == 0)
168 break;
169 }
172 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
173 while (lu->lu_watchreq->lr_locked != 0)
170 atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
171 2, &lval);
172 if (lval == 0)
173 lu->lu_watchreq->lr_locked = 0;
174 else
174 lck->l_wait(lck, lu);
175 lck->l_wait(lck, lu);
175 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
176
176 }
177 }
178 lu->lu_myreq->lr_active = 1;
179}
180
181/*
182 * Release a lock.
183 */
184void
185_lock_release(struct lock *lck, struct lockuser *lu)
186{
187 struct lockuser *lu_tmp, *lu_h;
188 struct lockreq *myreq;
189 int prio_h;
177 }
178 }
179 lu->lu_myreq->lr_active = 1;
180}
181
182/*
183 * Release a lock.
184 */
185void
186_lock_release(struct lock *lck, struct lockuser *lu)
187{
188 struct lockuser *lu_tmp, *lu_h;
189 struct lockreq *myreq;
190 int prio_h;
191 long lval;
190
191 /**
192 * XXX - We probably want to remove these checks to optimize
193 * performance. It is also a bug if any one of the
194 * checks fail, so it's probably better to just let it
195 * SEGV and fix it.
196 */
197#if 0

--- 21 unchanged lines hidden (view full) ---

219 * request so that we can grant the lock.
220 */
221 myreq = lu->lu_myreq;
222 lu->lu_myreq = lu->lu_watchreq;
223 lu->lu_watchreq = NULL;
224 lu->lu_myreq->lr_locked = 1;
225 lu->lu_myreq->lr_owner = lu;
226 lu->lu_myreq->lr_watcher = NULL;
192
193 /**
194 * XXX - We probably want to remove these checks to optimize
195 * performance. It is also a bug if any one of the
196 * checks fail, so it's probably better to just let it
197 * SEGV and fix it.
198 */
199#if 0

--- 21 unchanged lines hidden (view full) ---

221 * request so that we can grant the lock.
222 */
223 myreq = lu->lu_myreq;
224 lu->lu_myreq = lu->lu_watchreq;
225 lu->lu_watchreq = NULL;
226 lu->lu_myreq->lr_locked = 1;
227 lu->lu_myreq->lr_owner = lu;
228 lu->lu_myreq->lr_watcher = NULL;
227 lu->lu_myreq->lr_waiting = 0;
228 /*
229 * Traverse the list of lock requests in reverse order
230 * looking for the user with the highest priority.
231 */
232 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
233 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
234 if (lu_tmp->lu_priority > prio_h) {
235 lu_h = lu_tmp;
236 prio_h = lu_tmp->lu_priority;
237 }
238 }
239 if (lu_h != NULL) {
240 /* Give the lock to the highest priority user. */
229 /*
230 * Traverse the list of lock requests in reverse order
231 * looking for the user with the highest priority.
232 */
233 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
234 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
235 if (lu_tmp->lu_priority > prio_h) {
236 lu_h = lu_tmp;
237 prio_h = lu_tmp->lu_priority;
238 }
239 }
240 if (lu_h != NULL) {
241 /* Give the lock to the highest priority user. */
241 if ((lu_h->lu_watchreq->lr_waiting != 0) &&
242 (lck->l_wakeup != NULL))
243 /* Notify the sleeper */
244 lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
242 if (lck->l_wakeup != NULL) {
243 atomic_swap_long(
244 (long *)&lu_h->lu_watchreq->lr_locked,
245 0, &lval);
246 if (lval == 2)
247 /* Notify the sleeper */
248 lck->l_wakeup(lck,
249 lu_h->lu_myreq->lr_watcher);
250 }
245 else
251 else
246 atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
252 atomic_store_rel_long(
253 &lu_h->lu_watchreq->lr_locked, 0);
247 } else {
254 } else {
248 if ((myreq->lr_waiting != 0) &&
249 (lck->l_wakeup != NULL))
250 /* Notify the sleeper */
251 lck->l_wakeup(lck, myreq->lr_watcher);
255 if (lck->l_wakeup != NULL) {
256 atomic_swap_long((long *)&myreq->lr_locked,
257 0, &lval);
258 if (lval == 2)
259 /* Notify the sleeper */
260 lck->l_wakeup(lck, myreq->lr_watcher);
261 }
252 else
253 /* Give the lock to the previous request. */
254 atomic_store_rel_long(&myreq->lr_locked, 0);
255 }
256 } else {
257 /*
258 * The watch request now becomes our own because we've
259 * traded away our previous request. Save our previous
260 * request so that we can grant the lock.
261 */
262 myreq = lu->lu_myreq;
263 lu->lu_myreq = lu->lu_watchreq;
264 lu->lu_watchreq = NULL;
265 lu->lu_myreq->lr_locked = 1;
262 else
263 /* Give the lock to the previous request. */
264 atomic_store_rel_long(&myreq->lr_locked, 0);
265 }
266 } else {
267 /*
268 * The watch request now becomes our own because we've
269 * traded away our previous request. Save our previous
270 * request so that we can grant the lock.
271 */
272 myreq = lu->lu_myreq;
273 lu->lu_myreq = lu->lu_watchreq;
274 lu->lu_watchreq = NULL;
275 lu->lu_myreq->lr_locked = 1;
266 lu->lu_myreq->lr_waiting = 0;
267 if (myreq->lr_waiting != 0 && lck->l_wakeup)
268 /* Notify the sleeper */
269 lck->l_wakeup(lck, myreq->lr_watcher);
276 if (lck->l_wakeup) {
277 atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
278 if (lval == 2)
279 /* Notify the sleeper */
280 lck->l_wakeup(lck, myreq->lr_watcher);
281 }
270 else
271 /* Give the lock to the previous request. */
272 atomic_store_rel_long(&myreq->lr_locked, 0);
273 }
274 lu->lu_myreq->lr_active = 0;
275}
276
277void
278_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
279{
282 else
283 /* Give the lock to the previous request. */
284 atomic_store_rel_long(&myreq->lr_locked, 0);
285 }
286 lu->lu_myreq->lr_active = 0;
287}
288
289void
290_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
291{
280 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0);
292 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
281}
282
283void
284_lockuser_setactive(struct lockuser *lu, int active)
285{
286 lu->lu_myreq->lr_active = active;
287}
288
293}
294
295void
296_lockuser_setactive(struct lockuser *lu, int active)
297{
298 lu->lu_myreq->lr_active = active;
299}
300