Deleted Added
full compact
lock.c (115080) lock.c (115278)
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115080 2003-05-16 19:58:30Z deischen $
26 * $FreeBSD: head/lib/libkse/sys/lock.c 115278 2003-05-24 02:29:25Z deischen $
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"
35#include "lock.h"
36
37#define LCK_ASSERT assert
38#define MAX_SPINS 500
39
40void
41_lock_destroy(struct lock *lck)
42{
43
44 if ((lck != NULL) && (lck->l_head != NULL)) {
45 free(lck->l_head);
46 lck->l_head = NULL;
47 lck->l_tail = NULL;
48 }
49}
50
51int
52_lock_init(struct lock *lck, enum lock_type ltype,
53 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
54{
55
56 if (lck == NULL)
57 return (-1);
58 else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
27 */
28
29#include <sys/types.h>
30#include <machine/atomic.h>
31#include <assert.h>
32#include <stdlib.h>
33
34#include "atomic_ops.h"
35#include "lock.h"
36
37#define LCK_ASSERT assert
38#define MAX_SPINS 500
39
40void
41_lock_destroy(struct lock *lck)
42{
43
44 if ((lck != NULL) && (lck->l_head != NULL)) {
45 free(lck->l_head);
46 lck->l_head = NULL;
47 lck->l_tail = NULL;
48 }
49}
50
51int
52_lock_init(struct lock *lck, enum lock_type ltype,
53 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
54{
55
56 if (lck == NULL)
57 return (-1);
58 else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
59 return (-1);
60 else {
61 lck->l_type = ltype;
62 lck->l_wait = waitfunc;
63 lck->l_wakeup = wakeupfunc;
64 lck->l_head->lr_locked = 0;
65 lck->l_head->lr_watcher = NULL;
66 lck->l_head->lr_owner = NULL;
67 lck->l_head->lr_waiting = 0;
68 lck->l_head->lr_active = 1;
69 lck->l_tail = lck->l_head;
70 }
71 return (0);
72}
73
74int
75_lockuser_init(struct lockuser *lu, void *priv)
76{
77
78 if (lu == NULL)
79 return (-1);
80 else if ((lu->lu_myreq == NULL) &&
81 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
82 return (-1);
83 else {
84 lu->lu_myreq->lr_locked = 1;
85 lu->lu_myreq->lr_watcher = NULL;
86 lu->lu_myreq->lr_owner = lu;
67 lck->l_head->lr_active = 1;
68 lck->l_tail = lck->l_head;
69 }
70 return (0);
71}
72
73int
74_lockuser_init(struct lockuser *lu, void *priv)
75{
76
77 if (lu == NULL)
78 return (-1);
79 else if ((lu->lu_myreq == NULL) &&
80 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
81 return (-1);
82 else {
83 lu->lu_myreq->lr_locked = 1;
84 lu->lu_myreq->lr_watcher = NULL;
85 lu->lu_myreq->lr_owner = lu;
87 lu->lu_myreq->lr_waiting = 0;
88 lu->lu_myreq->lr_active = 0;
89 lu->lu_watchreq = NULL;
90 lu->lu_priority = 0;
91 lu->lu_private = priv;
92 lu->lu_private2 = NULL;
93 }
94 return (0);
95}
96
97void
98_lockuser_destroy(struct lockuser *lu)
99{
100
101 if ((lu != NULL) && (lu->lu_myreq != NULL))
102 free(lu->lu_myreq);
103}
104
105/*
106 * Acquire a lock waiting (spin or sleep) for it to become available.
107 */
108void
109_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
110{
111 int i;
86 lu->lu_myreq->lr_active = 0;
87 lu->lu_watchreq = NULL;
88 lu->lu_priority = 0;
89 lu->lu_private = priv;
90 lu->lu_private2 = NULL;
91 }
92 return (0);
93}
94
95void
96_lockuser_destroy(struct lockuser *lu)
97{
98
99 if ((lu != NULL) && (lu->lu_myreq != NULL))
100 free(lu->lu_myreq);
101}
102
103/*
104 * Acquire a lock waiting (spin or sleep) for it to become available.
105 */
106void
107_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
108{
109 int i;
110 long lval;
112
113 /**
114 * XXX - We probably want to remove these checks to optimize
115 * performance. It is also a bug if any one of the
116 * checks fail, so it's probably better to just let it
117 * SEGV and fix it.
118 */
119#if 0
120 if (lck == NULL || lu == NULL || lck->l_head == NULL)
121 return;
122#endif
123 if ((lck->l_type & LCK_PRIORITY) == 0)
124 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
125 else {
126 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
127 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
128 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
111
112 /**
113 * XXX - We probably want to remove these checks to optimize
114 * performance. It is also a bug if any one of the
115 * checks fail, so it's probably better to just let it
116 * SEGV and fix it.
117 */
118#if 0
119 if (lck == NULL || lu == NULL || lck->l_head == NULL)
120 return;
121#endif
122 if ((lck->l_type & LCK_PRIORITY) == 0)
123 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
124 else {
125 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
126 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
127 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
129 LCK_ASSERT(lu->lu_myreq->lr_waiting == 0);
130 LCK_ASSERT(lu->lu_watchreq == NULL);
131
132 lu->lu_priority = prio;
133 /*
134 * Atomically swap the head of the lock request with
135 * this request.
136 */
137 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
138 }
139
140 if (lu->lu_watchreq->lr_locked != 0) {
141 atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
142 if ((lck->l_wait == NULL) ||
143 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
144 while (lu->lu_watchreq->lr_locked == 0)
145 ; /* spin, then yield? */
146 } else {
147 /*
148 * Spin for a bit before invoking the wait function.
149 *
150 * We should be a little smarter here. If we're
151 * running on a single processor, then the lock
152 * owner got preempted and spinning will accomplish
153 * nothing but waste time. If we're running on
154 * multiple processors, the owner could be running
155 * on another CPU and we might acquire the lock if
156 * we spin for a bit.
157 *
158 * The other thing to keep in mind is that threads
159 * acquiring these locks are considered to be in
160 * critical regions; they will not be preempted by
161 * the _UTS_ until they release the lock. It is
162 * therefore safe to assume that if a lock can't
163 * be acquired, it is currently held by a thread
164 * running in another KSE.
165 */
166 for (i = 0; i < MAX_SPINS; i++) {
167 if (lu->lu_watchreq->lr_locked == 0)
168 return;
169 if (lu->lu_watchreq->lr_active == 0)
170 break;
171 }
128 LCK_ASSERT(lu->lu_watchreq == NULL);
129
130 lu->lu_priority = prio;
131 /*
132 * Atomically swap the head of the lock request with
133 * this request.
134 */
135 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
136 }
137
138 if (lu->lu_watchreq->lr_locked != 0) {
139 atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
140 if ((lck->l_wait == NULL) ||
141 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
142 while (lu->lu_watchreq->lr_locked == 0)
143 ; /* spin, then yield? */
144 } else {
145 /*
146 * Spin for a bit before invoking the wait function.
147 *
148 * We should be a little smarter here. If we're
149 * running on a single processor, then the lock
150 * owner got preempted and spinning will accomplish
151 * nothing but waste time. If we're running on
152 * multiple processors, the owner could be running
153 * on another CPU and we might acquire the lock if
154 * we spin for a bit.
155 *
156 * The other thing to keep in mind is that threads
157 * acquiring these locks are considered to be in
158 * critical regions; they will not be preempted by
159 * the _UTS_ until they release the lock. It is
160 * therefore safe to assume that if a lock can't
161 * be acquired, it is currently held by a thread
162 * running in another KSE.
163 */
164 for (i = 0; i < MAX_SPINS; i++) {
165 if (lu->lu_watchreq->lr_locked == 0)
166 return;
167 if (lu->lu_watchreq->lr_active == 0)
168 break;
169 }
172 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
173 while (lu->lu_watchreq->lr_locked != 0)
170 atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
171 2, &lval);
172 if (lval == 0)
173 lu->lu_watchreq->lr_locked = 0;
174 else
174 lck->l_wait(lck, lu);
175 lck->l_wait(lck, lu);
175 atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
176
176 }
177 }
178 lu->lu_myreq->lr_active = 1;
179}
180
181/*
182 * Release a lock.
183 */
184void
185_lock_release(struct lock *lck, struct lockuser *lu)
186{
187 struct lockuser *lu_tmp, *lu_h;
188 struct lockreq *myreq;
189 int prio_h;
177 }
178 }
179 lu->lu_myreq->lr_active = 1;
180}
181
182/*
183 * Release a lock.
184 */
185void
186_lock_release(struct lock *lck, struct lockuser *lu)
187{
188 struct lockuser *lu_tmp, *lu_h;
189 struct lockreq *myreq;
190 int prio_h;
191 long lval;
190
191 /**
192 * XXX - We probably want to remove these checks to optimize
193 * performance. It is also a bug if any one of the
194 * checks fail, so it's probably better to just let it
195 * SEGV and fix it.
196 */
197#if 0
198 if ((lck == NULL) || (lu == NULL))
199 return;
200#endif
201 if ((lck->l_type & LCK_PRIORITY) != 0) {
202 prio_h = 0;
203 lu_h = NULL;
204
205 /* Update tail if our request is last. */
206 if (lu->lu_watchreq->lr_owner == NULL) {
207 atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
208 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
209 } else {
210 /* Remove ourselves from the list. */
211 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
212 lu->lu_watchreq->lr_owner);
213 atomic_store_rel_ptr(
214 &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
215 }
216 /*
217 * The watch request now becomes our own because we've
218 * traded away our previous request. Save our previous
219 * request so that we can grant the lock.
220 */
221 myreq = lu->lu_myreq;
222 lu->lu_myreq = lu->lu_watchreq;
223 lu->lu_watchreq = NULL;
224 lu->lu_myreq->lr_locked = 1;
225 lu->lu_myreq->lr_owner = lu;
226 lu->lu_myreq->lr_watcher = NULL;
192
193 /**
194 * XXX - We probably want to remove these checks to optimize
195 * performance. It is also a bug if any one of the
196 * checks fail, so it's probably better to just let it
197 * SEGV and fix it.
198 */
199#if 0
200 if ((lck == NULL) || (lu == NULL))
201 return;
202#endif
203 if ((lck->l_type & LCK_PRIORITY) != 0) {
204 prio_h = 0;
205 lu_h = NULL;
206
207 /* Update tail if our request is last. */
208 if (lu->lu_watchreq->lr_owner == NULL) {
209 atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
210 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
211 } else {
212 /* Remove ourselves from the list. */
213 atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
214 lu->lu_watchreq->lr_owner);
215 atomic_store_rel_ptr(
216 &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
217 }
218 /*
219 * The watch request now becomes our own because we've
220 * traded away our previous request. Save our previous
221 * request so that we can grant the lock.
222 */
223 myreq = lu->lu_myreq;
224 lu->lu_myreq = lu->lu_watchreq;
225 lu->lu_watchreq = NULL;
226 lu->lu_myreq->lr_locked = 1;
227 lu->lu_myreq->lr_owner = lu;
228 lu->lu_myreq->lr_watcher = NULL;
227 lu->lu_myreq->lr_waiting = 0;
228 /*
229 * Traverse the list of lock requests in reverse order
230 * looking for the user with the highest priority.
231 */
232 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
233 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
234 if (lu_tmp->lu_priority > prio_h) {
235 lu_h = lu_tmp;
236 prio_h = lu_tmp->lu_priority;
237 }
238 }
239 if (lu_h != NULL) {
240 /* Give the lock to the highest priority user. */
229 /*
230 * Traverse the list of lock requests in reverse order
231 * looking for the user with the highest priority.
232 */
233 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
234 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
235 if (lu_tmp->lu_priority > prio_h) {
236 lu_h = lu_tmp;
237 prio_h = lu_tmp->lu_priority;
238 }
239 }
240 if (lu_h != NULL) {
241 /* Give the lock to the highest priority user. */
241 if ((lu_h->lu_watchreq->lr_waiting != 0) &&
242 (lck->l_wakeup != NULL))
243 /* Notify the sleeper */
244 lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
242 if (lck->l_wakeup != NULL) {
243 atomic_swap_long(
244 (long *)&lu_h->lu_watchreq->lr_locked,
245 0, &lval);
246 if (lval == 2)
247 /* Notify the sleeper */
248 lck->l_wakeup(lck,
249 lu_h->lu_myreq->lr_watcher);
250 }
245 else
251 else
246 atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
252 atomic_store_rel_long(
253 &lu_h->lu_watchreq->lr_locked, 0);
247 } else {
254 } else {
248 if ((myreq->lr_waiting != 0) &&
249 (lck->l_wakeup != NULL))
250 /* Notify the sleeper */
251 lck->l_wakeup(lck, myreq->lr_watcher);
255 if (lck->l_wakeup != NULL) {
256 atomic_swap_long((long *)&myreq->lr_locked,
257 0, &lval);
258 if (lval == 2)
259 /* Notify the sleeper */
260 lck->l_wakeup(lck, myreq->lr_watcher);
261 }
252 else
253 /* Give the lock to the previous request. */
254 atomic_store_rel_long(&myreq->lr_locked, 0);
255 }
256 } else {
257 /*
258 * The watch request now becomes our own because we've
259 * traded away our previous request. Save our previous
260 * request so that we can grant the lock.
261 */
262 myreq = lu->lu_myreq;
263 lu->lu_myreq = lu->lu_watchreq;
264 lu->lu_watchreq = NULL;
265 lu->lu_myreq->lr_locked = 1;
262 else
263 /* Give the lock to the previous request. */
264 atomic_store_rel_long(&myreq->lr_locked, 0);
265 }
266 } else {
267 /*
268 * The watch request now becomes our own because we've
269 * traded away our previous request. Save our previous
270 * request so that we can grant the lock.
271 */
272 myreq = lu->lu_myreq;
273 lu->lu_myreq = lu->lu_watchreq;
274 lu->lu_watchreq = NULL;
275 lu->lu_myreq->lr_locked = 1;
266 lu->lu_myreq->lr_waiting = 0;
267 if (myreq->lr_waiting != 0 && lck->l_wakeup)
268 /* Notify the sleeper */
269 lck->l_wakeup(lck, myreq->lr_watcher);
276 if (lck->l_wakeup) {
277 atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
278 if (lval == 2)
279 /* Notify the sleeper */
280 lck->l_wakeup(lck, myreq->lr_watcher);
281 }
270 else
271 /* Give the lock to the previous request. */
272 atomic_store_rel_long(&myreq->lr_locked, 0);
273 }
274 lu->lu_myreq->lr_active = 0;
275}
276
277void
278_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
279{
282 else
283 /* Give the lock to the previous request. */
284 atomic_store_rel_long(&myreq->lr_locked, 0);
285 }
286 lu->lu_myreq->lr_active = 0;
287}
288
289void
290_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
291{
280 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0);
292 atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
281}
282
283void
284_lockuser_setactive(struct lockuser *lu, int active)
285{
286 lu->lu_myreq->lr_active = active;
287}
288
293}
294
295void
296_lockuser_setactive(struct lockuser *lu, int active)
297{
298 lu->lu_myreq->lr_active = active;
299}
300