Deleted Added
full compact
kern_rwlock.c (160771) kern_rwlock.c (164159)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 160771 2006-07-27 21:45:55Z jhb $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 164159 2006-11-11 03:18:07Z kmacy $");
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/systm.h>
46#include <sys/turnstile.h>
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/systm.h>
46#include <sys/turnstile.h>
47
47#include <sys/lock_profile.h>
48#include <machine/cpu.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void db_show_rwlock(struct lock_object *lock);
54#endif
55

--- 25 unchanged lines hidden (view full) ---

81#endif
82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87 rw->rw_lock = RW_UNLOCKED;
88
48#include <machine/cpu.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void db_show_rwlock(struct lock_object *lock);
54#endif
55

--- 25 unchanged lines hidden (view full) ---

81#endif
82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87 rw->rw_lock = RW_UNLOCKED;
88
89 lock_profile_object_init(&rw->rw_object, name);
89 lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
90 LO_RECURSABLE | LO_UPGRADABLE);
91}
92
93void
94rw_destroy(struct rwlock *rw)
95{
96
97 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
90 lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
91 LO_RECURSABLE | LO_UPGRADABLE);
92}
93
94void
95rw_destroy(struct rwlock *rw)
96{
97
98 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
99 lock_profile_object_destroy(&rw->rw_object);
98 lock_destroy(&rw->rw_object);
99}
100
101void
102rw_sysinit(void *arg)
103{
104 struct rw_args *args = arg;
105
106 rw_init(args->ra_rw, args->ra_desc);
107}
108
109void
110_rw_wlock(struct rwlock *rw, const char *file, int line)
111{
100 lock_destroy(&rw->rw_object);
101}
102
103void
104rw_sysinit(void *arg)
105{
106 struct rw_args *args = arg;
107
108 rw_init(args->ra_rw, args->ra_desc);
109}
110
111void
112_rw_wlock(struct rwlock *rw, const char *file, int line)
113{
114 uint64_t waitstart;
112
113 MPASS(curthread != NULL);
114 KASSERT(rw_wowner(rw) != curthread,
115 ("%s (%s): wlock already held @ %s:%d", __func__,
116 rw->rw_object.lo_name, file, line));
117 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
118 line);
115
116 MPASS(curthread != NULL);
117 KASSERT(rw_wowner(rw) != curthread,
118 ("%s (%s): wlock already held @ %s:%d", __func__,
119 rw->rw_object.lo_name, file, line));
120 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
121 line);
122 lock_profile_waitstart(&waitstart);
119 __rw_wlock(rw, curthread, file, line);
123 __rw_wlock(rw, curthread, file, line);
124 lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
120 LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
121 WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
122 curthread->td_locks++;
123}
124
125void
126_rw_wunlock(struct rwlock *rw, const char *file, int line)
127{
128
129 MPASS(curthread != NULL);
130 _rw_assert(rw, RA_WLOCKED, file, line);
131 curthread->td_locks--;
132 WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
133 LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
125 LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
126 WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
127 curthread->td_locks++;
128}
129
130void
131_rw_wunlock(struct rwlock *rw, const char *file, int line)
132{
133
134 MPASS(curthread != NULL);
135 _rw_assert(rw, RA_WLOCKED, file, line);
136 curthread->td_locks--;
137 WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
138 LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
139 lock_profile_release_lock(&rw->rw_object);
134 __rw_wunlock(rw, curthread, file, line);
135}
136
137void
138_rw_rlock(struct rwlock *rw, const char *file, int line)
139{
140#ifdef SMP
141 volatile struct thread *owner;
142#endif
140 __rw_wunlock(rw, curthread, file, line);
141}
142
143void
144_rw_rlock(struct rwlock *rw, const char *file, int line)
145{
146#ifdef SMP
147 volatile struct thread *owner;
148#endif
149 uint64_t waitstart;
150 int contested;
143 uintptr_t x;
144
145 KASSERT(rw_wowner(rw) != curthread,
146 ("%s (%s): wlock already held @ %s:%d", __func__,
147 rw->rw_object.lo_name, file, line));
148 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
149
150 /*
151 * Note that we don't make any attempt to try to block read
152 * locks once a writer has blocked on the lock. The reason is
153 * that we currently allow for read locks to recurse and we
154 * don't keep track of all the holders of read locks. Thus, if
155 * we were to block readers once a writer blocked and a reader
156 * tried to recurse on their reader lock after a writer had
157 * blocked we would end up in a deadlock since the reader would
158 * be blocked on the writer, and the writer would be blocked
159 * waiting for the reader to release its original read lock.
160 */
151 uintptr_t x;
152
153 KASSERT(rw_wowner(rw) != curthread,
154 ("%s (%s): wlock already held @ %s:%d", __func__,
155 rw->rw_object.lo_name, file, line));
156 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
157
158 /*
159 * Note that we don't make any attempt to try to block read
160 * locks once a writer has blocked on the lock. The reason is
161 * that we currently allow for read locks to recurse and we
162 * don't keep track of all the holders of read locks. Thus, if
163 * we were to block readers once a writer blocked and a reader
164 * tried to recurse on their reader lock after a writer had
165 * blocked we would end up in a deadlock since the reader would
166 * be blocked on the writer, and the writer would be blocked
167 * waiting for the reader to release its original read lock.
168 */
169 lock_profile_waitstart(&waitstart);
161 for (;;) {
162 /*
163 * Handle the easy case. If no other thread has a write
164 * lock, then try to bump up the count of read locks. Note
165 * that we have to preserve the current state of the
166 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a
167 * read lock, then rw_lock must have changed, so restart
168 * the loop. Note that this handles the case of a

--- 6 unchanged lines hidden (view full) ---

175 /*
176 * The RW_LOCK_READ_WAITERS flag should only be set
177 * if another thread currently holds a write lock,
178 * and in that case RW_LOCK_READ should be clear.
179 */
180 MPASS((x & RW_LOCK_READ_WAITERS) == 0);
181 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
182 x + RW_ONE_READER)) {
170 for (;;) {
171 /*
172 * Handle the easy case. If no other thread has a write
173 * lock, then try to bump up the count of read locks. Note
174 * that we have to preserve the current state of the
175 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a
176 * read lock, then rw_lock must have changed, so restart
177 * the loop. Note that this handles the case of a

--- 6 unchanged lines hidden (view full) ---

184 /*
185 * The RW_LOCK_READ_WAITERS flag should only be set
186 * if another thread currently holds a write lock,
187 * and in that case RW_LOCK_READ should be clear.
188 */
189 MPASS((x & RW_LOCK_READ_WAITERS) == 0);
190 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
191 x + RW_ONE_READER)) {
192 lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
183 if (LOCK_LOG_TEST(&rw->rw_object, 0))
184 CTR4(KTR_LOCK,
185 "%s: %p succeed %p -> %p", __func__,
186 rw, (void *)x,
187 (void *)(x + RW_ONE_READER));
188 break;
189 }
190 cpu_spinwait();
193 if (LOCK_LOG_TEST(&rw->rw_object, 0))
194 CTR4(KTR_LOCK,
195 "%s: %p succeed %p -> %p", __func__,
196 rw, (void *)x,
197 (void *)(x + RW_ONE_READER));
198 break;
199 }
200 cpu_spinwait();
201 lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
191 continue;
192 }
193
194 /*
195 * Okay, now it's the hard case. Some other thread already
196 * has a write lock, so acquire the turnstile lock so we can
197 * begin the process of blocking.
198 */

--- 32 unchanged lines hidden (view full) ---

231#ifdef SMP
232 /*
233 * If the owner is running on another CPU, spin until
234 * the owner stops running or the state of the lock
235 * changes.
236 */
237 owner = (struct thread *)RW_OWNER(x);
238 if (TD_IS_RUNNING(owner)) {
202 continue;
203 }
204
205 /*
206 * Okay, now it's the hard case. Some other thread already
207 * has a write lock, so acquire the turnstile lock so we can
208 * begin the process of blocking.
209 */

--- 32 unchanged lines hidden (view full) ---

242#ifdef SMP
243 /*
244 * If the owner is running on another CPU, spin until
245 * the owner stops running or the state of the lock
246 * changes.
247 */
248 owner = (struct thread *)RW_OWNER(x);
249 if (TD_IS_RUNNING(owner)) {
250 lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
239 turnstile_release(&rw->rw_object);
240 if (LOCK_LOG_TEST(&rw->rw_object, 0))
241 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
242 __func__, rw, owner);
243 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
244 TD_IS_RUNNING(owner))
245 cpu_spinwait();
246 continue;

--- 49 unchanged lines hidden (view full) ---

296 if (LOCK_LOG_TEST(&rw->rw_object, 0))
297 CTR4(KTR_LOCK,
298 "%s: %p succeeded %p -> %p",
299 __func__, rw, (void *)x,
300 (void *)(x - RW_ONE_READER));
301 break;
302 }
303 continue;
251 turnstile_release(&rw->rw_object);
252 if (LOCK_LOG_TEST(&rw->rw_object, 0))
253 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
254 __func__, rw, owner);
255 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
256 TD_IS_RUNNING(owner))
257 cpu_spinwait();
258 continue;

--- 49 unchanged lines hidden (view full) ---

308 if (LOCK_LOG_TEST(&rw->rw_object, 0))
309 CTR4(KTR_LOCK,
310 "%s: %p succeeded %p -> %p",
311 __func__, rw, (void *)x,
312 (void *)(x - RW_ONE_READER));
313 break;
314 }
315 continue;
304 }
316 } else
317 lock_profile_release_lock(&rw->rw_object);
305
318
319
306 /*
307 * We should never have read waiters while at least one
308 * thread holds a read lock. (See note above)
309 */
310 KASSERT(!(x & RW_LOCK_READ_WAITERS),
311 ("%s: waiting readers", __func__));
312
313 /*

--- 78 unchanged lines hidden (view full) ---

392 * read or write lock.
393 */
394void
395_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
396{
397#ifdef SMP
398 volatile struct thread *owner;
399#endif
320 /*
321 * We should never have read waiters while at least one
322 * thread holds a read lock. (See note above)
323 */
324 KASSERT(!(x & RW_LOCK_READ_WAITERS),
325 ("%s: waiting readers", __func__));
326
327 /*

--- 78 unchanged lines hidden (view full) ---

406 * read or write lock.
407 */
408void
409_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
410{
411#ifdef SMP
412 volatile struct thread *owner;
413#endif
414 int contested;
400 uintptr_t v;
401
402 if (LOCK_LOG_TEST(&rw->rw_object, 0))
403 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
404 rw->rw_object.lo_name, (void *)rw->rw_lock, file, line);
405
406 while (!_rw_write_lock(rw, tid)) {
407 turnstile_lock(&rw->rw_object);

--- 25 unchanged lines hidden (view full) ---

433 tid | RW_LOCK_WRITE_WAITERS)) {
434 turnstile_claim(&rw->rw_object);
435 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
436 __func__, rw);
437 break;
438 }
439 turnstile_release(&rw->rw_object);
440 cpu_spinwait();
415 uintptr_t v;
416
417 if (LOCK_LOG_TEST(&rw->rw_object, 0))
418 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
419 rw->rw_object.lo_name, (void *)rw->rw_lock, file, line);
420
421 while (!_rw_write_lock(rw, tid)) {
422 turnstile_lock(&rw->rw_object);

--- 25 unchanged lines hidden (view full) ---

448 tid | RW_LOCK_WRITE_WAITERS)) {
449 turnstile_claim(&rw->rw_object);
450 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
451 __func__, rw);
452 break;
453 }
454 turnstile_release(&rw->rw_object);
455 cpu_spinwait();
456 lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
441 continue;
442 }
443
444 /*
445 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
446 * set it. If we fail to set it, then loop back and try
447 * again.
448 */
449 if (!(v & RW_LOCK_WRITE_WAITERS)) {
450 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
451 v | RW_LOCK_WRITE_WAITERS)) {
452 turnstile_release(&rw->rw_object);
453 cpu_spinwait();
457 continue;
458 }
459
460 /*
461 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
462 * set it. If we fail to set it, then loop back and try
463 * again.
464 */
465 if (!(v & RW_LOCK_WRITE_WAITERS)) {
466 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
467 v | RW_LOCK_WRITE_WAITERS)) {
468 turnstile_release(&rw->rw_object);
469 cpu_spinwait();
470 lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
454 continue;
455 }
456 if (LOCK_LOG_TEST(&rw->rw_object, 0))
457 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
458 __func__, rw);
459 }
460
461#ifdef SMP
462 /*
463 * If the lock is write locked and the owner is
464 * running on another CPU, spin until the owner stops
465 * running or the state of the lock changes.
466 */
467 owner = (struct thread *)RW_OWNER(v);
468 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
471 continue;
472 }
473 if (LOCK_LOG_TEST(&rw->rw_object, 0))
474 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
475 __func__, rw);
476 }
477
478#ifdef SMP
479 /*
480 * If the lock is write locked and the owner is
481 * running on another CPU, spin until the owner stops
482 * running or the state of the lock changes.
483 */
484 owner = (struct thread *)RW_OWNER(v);
485 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
486 lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
469 turnstile_release(&rw->rw_object);
470 if (LOCK_LOG_TEST(&rw->rw_object, 0))
471 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
472 __func__, rw, owner);
473 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
474 TD_IS_RUNNING(owner))
475 cpu_spinwait();
476 continue;

--- 344 unchanged lines hidden ---
487 turnstile_release(&rw->rw_object);
488 if (LOCK_LOG_TEST(&rw->rw_object, 0))
489 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
490 __func__, rw, owner);
491 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
492 TD_IS_RUNNING(owner))
493 cpu_spinwait();
494 continue;

--- 344 unchanged lines hidden ---