Deleted Added
full compact
kern_rwlock.c (155162) kern_rwlock.c (157826)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 155162 2006-02-01 04:18:07Z scottl $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 157826 2006-04-17 21:11:01Z jhb $");
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>

--- 12 unchanged lines hidden (view full) ---

56struct lock_class lock_class_rw = {
57 "rw",
58 LC_SLEEPLOCK | LC_RECURSABLE /* | LC_UPGRADABLE */,
59#ifdef DDB
60 db_show_rwlock
61#endif
62};
63
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>

--- 12 unchanged lines hidden (view full) ---

56struct lock_class lock_class_rw = {
57 "rw",
58 LC_SLEEPLOCK | LC_RECURSABLE /* | LC_UPGRADABLE */,
59#ifdef DDB
60 db_show_rwlock
61#endif
62};
63
64#define rw_owner(rw) \
64/*
65 * Return a pointer to the owning thread if the lock is write-locked or
66 * NULL if the lock is unlocked or read-locked.
67 */
68#define rw_wowner(rw) \
65 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \
66 (struct thread *)RW_OWNER((rw)->rw_lock))
67
69 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \
70 (struct thread *)RW_OWNER((rw)->rw_lock))
71
72/*
73 * Return a pointer to the owning thread for this lock who should receive
74 * any priority lent by threads that block on this lock. Currently this
75 * is identical to rw_wowner().
76 */
77#define rw_owner(rw) rw_wowner(rw)
78
68#ifndef INVARIANTS
69#define _rw_assert(rw, what, file, line)
70#endif
71
72void
73rw_init(struct rwlock *rw, const char *name)
74{
75

--- 19 unchanged lines hidden (view full) ---

95 rw_init(args->ra_rw, args->ra_desc);
96}
97
98void
99_rw_wlock(struct rwlock *rw, const char *file, int line)
100{
101
102 MPASS(curthread != NULL);
79#ifndef INVARIANTS
80#define _rw_assert(rw, what, file, line)
81#endif
82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86

--- 19 unchanged lines hidden (view full) ---

106 rw_init(args->ra_rw, args->ra_desc);
107}
108
109void
110_rw_wlock(struct rwlock *rw, const char *file, int line)
111{
112
113 MPASS(curthread != NULL);
103 KASSERT(rw_owner(rw) != curthread,
114 KASSERT(rw_wowner(rw) != curthread,
104 ("%s (%s): wlock already held @ %s:%d", __func__,
105 rw->rw_object.lo_name, file, line));
106 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
107 line);
108 __rw_wlock(rw, curthread, file, line);
109 LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
110 WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
111}

--- 9 unchanged lines hidden (view full) ---

121 __rw_wunlock(rw, curthread, file, line);
122}
123
124void
125_rw_rlock(struct rwlock *rw, const char *file, int line)
126{
127 uintptr_t x;
128
115 ("%s (%s): wlock already held @ %s:%d", __func__,
116 rw->rw_object.lo_name, file, line));
117 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
118 line);
119 __rw_wlock(rw, curthread, file, line);
120 LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
121 WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
122}

--- 9 unchanged lines hidden (view full) ---

132 __rw_wunlock(rw, curthread, file, line);
133}
134
135void
136_rw_rlock(struct rwlock *rw, const char *file, int line)
137{
138 uintptr_t x;
139
129 KASSERT(rw_owner(rw) != curthread,
140 KASSERT(rw_wowner(rw) != curthread,
130 ("%s (%s): wlock already held @ %s:%d", __func__,
131 rw->rw_object.lo_name, file, line));
132 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
133
134 /*
135 * Note that we don't make any attempt to try to block read
136 * locks once a writer has blocked on the lock. The reason is
137 * that we currently allow for read locks to recurse and we

--- 55 unchanged lines hidden (view full) ---

193 }
194
195 /*
196 * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS
197 * flag is already set, then we can go ahead and block. If
198 * it is not set then try to set it. If we fail to set it
199 * drop the turnstile lock and restart the loop.
200 */
141 ("%s (%s): wlock already held @ %s:%d", __func__,
142 rw->rw_object.lo_name, file, line));
143 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
144
145 /*
146 * Note that we don't make any attempt to try to block read
147 * locks once a writer has blocked on the lock. The reason is
148 * that we currently allow for read locks to recurse and we

--- 55 unchanged lines hidden (view full) ---

204 }
205
206 /*
207 * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS
208 * flag is already set, then we can go ahead and block. If
209 * it is not set then try to set it. If we fail to set it
210 * drop the turnstile lock and restart the loop.
211 */
201 if (!(x & RW_LOCK_READ_WAITERS) &&
202 !atomic_cmpset_ptr(&rw->rw_lock, x,
203 x | RW_LOCK_READ_WAITERS)) {
204 turnstile_release(&rw->rw_object);
205 continue;
212 if (!(x & RW_LOCK_READ_WAITERS)) {
213 if (!atomic_cmpset_ptr(&rw->rw_lock, x,
214 x | RW_LOCK_READ_WAITERS)) {
215 turnstile_release(&rw->rw_object);
216 cpu_spinwait();
217 continue;
218 }
219 if (LOCK_LOG_TEST(&rw->rw_object, 0))
220 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
221 __func__, rw);
206 }
222 }
207 if (!(x & RW_LOCK_READ_WAITERS) &&
208 LOCK_LOG_TEST(&rw->rw_object, 0))
209 CTR2(KTR_LOCK, "%s: %p set read waiters flag", __func__,
210 rw);
211
212 /*
213 * We were unable to acquire the lock and the read waiters
214 * flag is set, so we must block on the turnstile.
215 */
216 if (LOCK_LOG_TEST(&rw->rw_object, 0))
217 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
218 rw);

--- 178 unchanged lines hidden (view full) ---

397 continue;
398 }
399
400 /*
401 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
402 * set it. If we fail to set it, then loop back and try
403 * again.
404 */
223
224 /*
225 * We were unable to acquire the lock and the read waiters
226 * flag is set, so we must block on the turnstile.
227 */
228 if (LOCK_LOG_TEST(&rw->rw_object, 0))
229 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
230 rw);

--- 178 unchanged lines hidden (view full) ---

409 continue;
410 }
411
412 /*
413 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
414 * set it. If we fail to set it, then loop back and try
415 * again.
416 */
405 if (!(v & RW_LOCK_WRITE_WAITERS) &&
406 !atomic_cmpset_ptr(&rw->rw_lock, v,
407 v | RW_LOCK_WRITE_WAITERS)) {
408 turnstile_release(&rw->rw_object);
409 cpu_spinwait();
410 continue;
417 if (!(v & RW_LOCK_WRITE_WAITERS)) {
418 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
419 v | RW_LOCK_WRITE_WAITERS)) {
420 turnstile_release(&rw->rw_object);
421 cpu_spinwait();
422 continue;
423 }
424 if (LOCK_LOG_TEST(&rw->rw_object, 0))
425 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
426 __func__, rw);
411 }
427 }
412 if (!(v & RW_LOCK_WRITE_WAITERS) &&
413 LOCK_LOG_TEST(&rw->rw_object, 0))
414 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
415 __func__, rw);
416
417 /* XXX: Adaptively spin if current wlock owner on another CPU? */
418
419 /*
420 * We were unable to acquire the lock and the write waiters
421 * flag is set, so we must block on the turnstile.
422 */
423 if (LOCK_LOG_TEST(&rw->rw_object, 0))

--- 88 unchanged lines hidden (view full) ---

512#else
513 /*
514 * If some other thread has a write lock or we have one
515 * and are asserting a read lock, fail. Also, if no one
516 * has a lock at all, fail.
517 */
518 if (rw->rw_lock == RW_UNLOCKED ||
519 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
428
429 /* XXX: Adaptively spin if current wlock owner on another CPU? */
430
431 /*
432 * We were unable to acquire the lock and the write waiters
433 * flag is set, so we must block on the turnstile.
434 */
435 if (LOCK_LOG_TEST(&rw->rw_object, 0))

--- 88 unchanged lines hidden (view full) ---

524#else
525 /*
526 * If some other thread has a write lock or we have one
527 * and are asserting a read lock, fail. Also, if no one
528 * has a lock at all, fail.
529 */
530 if (rw->rw_lock == RW_UNLOCKED ||
531 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
520 rw_owner(rw) != curthread)))
532 rw_wowner(rw) != curthread)))
521 panic("Lock %s not %slocked @ %s:%d\n",
522 rw->rw_object.lo_name, (what == RA_RLOCKED) ?
523 "read " : "", file, line);
524#endif
525 break;
526 case RA_WLOCKED:
533 panic("Lock %s not %slocked @ %s:%d\n",
534 rw->rw_object.lo_name, (what == RA_RLOCKED) ?
535 "read " : "", file, line);
536#endif
537 break;
538 case RA_WLOCKED:
527 if (rw_owner(rw) != curthread)
539 if (rw_wowner(rw) != curthread)
528 panic("Lock %s not exclusively locked @ %s:%d\n",
529 rw->rw_object.lo_name, file, line);
530 break;
531 case RA_UNLOCKED:
532#ifdef WITNESS
533 witness_assert(&rw->rw_object, what, file, line);
534#else
535 /*
536 * If we hold a write lock fail. We can't reliably check
537 * to see if we hold a read lock or not.
538 */
540 panic("Lock %s not exclusively locked @ %s:%d\n",
541 rw->rw_object.lo_name, file, line);
542 break;
543 case RA_UNLOCKED:
544#ifdef WITNESS
545 witness_assert(&rw->rw_object, what, file, line);
546#else
547 /*
548 * If we hold a write lock fail. We can't reliably check
549 * to see if we hold a read lock or not.
550 */
539 if (rw_owner(rw) == curthread)
551 if (rw_wowner(rw) == curthread)
540 panic("Lock %s exclusively locked @ %s:%d\n",
541 rw->rw_object.lo_name, file, line);
542#endif
543 break;
544 default:
545 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
546 line);
547 }

--- 11 unchanged lines hidden (view full) ---

559
560 db_printf(" state: ");
561 if (rw->rw_lock == RW_UNLOCKED)
562 db_printf("UNLOCKED\n");
563 else if (rw->rw_lock & RW_LOCK_READ)
564 db_printf("RLOCK: %jd locks\n",
565 (intmax_t)(RW_READERS(rw->rw_lock)));
566 else {
552 panic("Lock %s exclusively locked @ %s:%d\n",
553 rw->rw_object.lo_name, file, line);
554#endif
555 break;
556 default:
557 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
558 line);
559 }

--- 11 unchanged lines hidden (view full) ---

571
572 db_printf(" state: ");
573 if (rw->rw_lock == RW_UNLOCKED)
574 db_printf("UNLOCKED\n");
575 else if (rw->rw_lock & RW_LOCK_READ)
576 db_printf("RLOCK: %jd locks\n",
577 (intmax_t)(RW_READERS(rw->rw_lock)));
578 else {
567 td = rw_owner(rw);
579 td = rw_wowner(rw);
568 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
569 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
570 }
571 db_printf(" waiters: ");
572 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
573 case RW_LOCK_READ_WAITERS:
574 db_printf("readers\n");
575 break;

--- 13 unchanged lines hidden ---
580 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
581 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
582 }
583 db_printf(" waiters: ");
584 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
585 case RW_LOCK_READ_WAITERS:
586 db_printf("readers\n");
587 break;

--- 13 unchanged lines hidden ---