Deleted Added
full compact
kern_rwlock.c (171052) kern_rwlock.c (171516)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 171052 2007-06-26 21:31:56Z attilio $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 171516 2007-07-20 08:43:42Z attilio $");
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
48#include <sys/lock_profile.h>
48
49#include <machine/cpu.h>
50
51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
52
53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54#define ADAPTIVE_RWLOCKS
55#endif
56

--- 159 unchanged lines hidden (view full) ---

216
217void
218_rw_rlock(struct rwlock *rw, const char *file, int line)
219{
220 struct turnstile *ts;
221#ifdef ADAPTIVE_RWLOCKS
222 volatile struct thread *owner;
223#endif
49#include <machine/cpu.h>
50
51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
52
53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54#define ADAPTIVE_RWLOCKS
55#endif
56

--- 159 unchanged lines hidden (view full) ---

216
217void
218_rw_rlock(struct rwlock *rw, const char *file, int line)
219{
220 struct turnstile *ts;
221#ifdef ADAPTIVE_RWLOCKS
222 volatile struct thread *owner;
223#endif
224#ifdef LOCK_PROFILING_SHARED
224 uint64_t waittime = 0;
225 int contested = 0;
225 uint64_t waittime = 0;
226 int contested = 0;
227#endif
226 uintptr_t x;
227
228 KASSERT(rw->rw_lock != RW_DESTROYED,
229 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
230 KASSERT(rw_wowner(rw) != curthread,
231 ("%s (%s): wlock already held @ %s:%d", __func__,
232 rw->lock_object.lo_name, file, line));
233 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);

--- 26 unchanged lines hidden (view full) ---

260 /*
261 * The RW_LOCK_READ_WAITERS flag should only be set
262 * if another thread currently holds a write lock,
263 * and in that case RW_LOCK_READ should be clear.
264 */
265 MPASS((x & RW_LOCK_READ_WAITERS) == 0);
266 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
267 x + RW_ONE_READER)) {
228 uintptr_t x;
229
230 KASSERT(rw->rw_lock != RW_DESTROYED,
231 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
232 KASSERT(rw_wowner(rw) != curthread,
233 ("%s (%s): wlock already held @ %s:%d", __func__,
234 rw->lock_object.lo_name, file, line));
235 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);

--- 26 unchanged lines hidden (view full) ---

262 /*
263 * The RW_LOCK_READ_WAITERS flag should only be set
264 * if another thread currently holds a write lock,
265 * and in that case RW_LOCK_READ should be clear.
266 */
267 MPASS((x & RW_LOCK_READ_WAITERS) == 0);
268 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
269 x + RW_ONE_READER)) {
270#ifdef LOCK_PROFILING_SHARED
271 if (RW_READERS(x) == 0)
272 lock_profile_obtain_lock_success(
273 &rw->lock_object, contested,
274 waittime, file, line);
275#endif
268 if (LOCK_LOG_TEST(&rw->lock_object, 0))
269 CTR4(KTR_LOCK,
270 "%s: %p succeed %p -> %p", __func__,
271 rw, (void *)x,
272 (void *)(x + RW_ONE_READER));
276 if (LOCK_LOG_TEST(&rw->lock_object, 0))
277 CTR4(KTR_LOCK,
278 "%s: %p succeed %p -> %p", __func__,
279 rw, (void *)x,
280 (void *)(x + RW_ONE_READER));
273 if (RW_READERS(x) == 0)
274 lock_profile_obtain_lock_success(
275 &rw->lock_object, contested, waittime,
276 file, line);
277 break;
278 }
279 cpu_spinwait();
280 continue;
281 }
281 break;
282 }
283 cpu_spinwait();
284 continue;
285 }
282 lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
283 &waittime);
284
285 /*
286 * Okay, now it's the hard case. Some other thread already
287 * has a write lock, so acquire the turnstile lock so we can
288 * begin the process of blocking.
289 */
290 ts = turnstile_trywait(&rw->lock_object);
291

--- 34 unchanged lines hidden (view full) ---

326 * changes.
327 */
328 owner = (struct thread *)RW_OWNER(x);
329 if (TD_IS_RUNNING(owner)) {
330 turnstile_cancel(ts);
331 if (LOCK_LOG_TEST(&rw->lock_object, 0))
332 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
333 __func__, rw, owner);
286
287 /*
288 * Okay, now it's the hard case. Some other thread already
289 * has a write lock, so acquire the turnstile lock so we can
290 * begin the process of blocking.
291 */
292 ts = turnstile_trywait(&rw->lock_object);
293

--- 34 unchanged lines hidden (view full) ---

328 * changes.
329 */
330 owner = (struct thread *)RW_OWNER(x);
331 if (TD_IS_RUNNING(owner)) {
332 turnstile_cancel(ts);
333 if (LOCK_LOG_TEST(&rw->lock_object, 0))
334 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
335 __func__, rw, owner);
336#ifdef LOCK_PROFILING_SHARED
337 lock_profile_obtain_lock_failed(&rw->lock_object,
338 &contested, &waittime);
339#endif
334 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
335 TD_IS_RUNNING(owner))
336 cpu_spinwait();
337 continue;
338 }
339#endif
340
341 /*
342 * We were unable to acquire the lock and the read waiters
343 * flag is set, so we must block on the turnstile.
344 */
345 if (LOCK_LOG_TEST(&rw->lock_object, 0))
346 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
347 rw);
340 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
341 TD_IS_RUNNING(owner))
342 cpu_spinwait();
343 continue;
344 }
345#endif
346
347 /*
348 * We were unable to acquire the lock and the read waiters
349 * flag is set, so we must block on the turnstile.
350 */
351 if (LOCK_LOG_TEST(&rw->lock_object, 0))
352 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
353 rw);
354#ifdef LOCK_PROFILING_SHARED
355 lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
356 &waittime);
357#endif
348 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
349 if (LOCK_LOG_TEST(&rw->lock_object, 0))
350 CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
351 __func__, rw);
352 }
353
354 /*
355 * TODO: acquire "owner of record" here. Here be turnstile dragons

--- 42 unchanged lines hidden (view full) ---

398
399
400 /*
401 * We should never have read waiters while at least one
402 * thread holds a read lock. (See note above)
403 */
404 KASSERT(!(x & RW_LOCK_READ_WAITERS),
405 ("%s: waiting readers", __func__));
358 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
359 if (LOCK_LOG_TEST(&rw->lock_object, 0))
360 CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
361 __func__, rw);
362 }
363
364 /*
365 * TODO: acquire "owner of record" here. Here be turnstile dragons

--- 42 unchanged lines hidden (view full) ---

408
409
410 /*
411 * We should never have read waiters while at least one
412 * thread holds a read lock. (See note above)
413 */
414 KASSERT(!(x & RW_LOCK_READ_WAITERS),
415 ("%s: waiting readers", __func__));
416#ifdef LOCK_PROFILING_SHARED
417 lock_profile_release_lock(&rw->lock_object);
418#endif
406
407 /*
408 * If there aren't any waiters for a write lock, then try
409 * to drop it quickly.
410 */
411 if (!(x & RW_LOCK_WRITE_WAITERS)) {
412
413 /*

--- 60 unchanged lines hidden (view full) ---

474 */
475 ts = turnstile_lookup(&rw->lock_object);
476 MPASS(ts != NULL);
477 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
478 turnstile_unpend(ts, TS_SHARED_LOCK);
479 turnstile_chain_unlock(&rw->lock_object);
480 break;
481 }
419
420 /*
421 * If there aren't any waiters for a write lock, then try
422 * to drop it quickly.
423 */
424 if (!(x & RW_LOCK_WRITE_WAITERS)) {
425
426 /*

--- 60 unchanged lines hidden (view full) ---

487 */
488 ts = turnstile_lookup(&rw->lock_object);
489 MPASS(ts != NULL);
490 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
491 turnstile_unpend(ts, TS_SHARED_LOCK);
492 turnstile_chain_unlock(&rw->lock_object);
493 break;
494 }
482 lock_profile_release_lock(&rw->lock_object);
483}
484
485/*
486 * This function is called when we are unable to obtain a write lock on the
487 * first try. This means that at least one other thread holds either a
488 * read or write lock.
489 */
490void
491_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
492{
493 struct turnstile *ts;
494#ifdef ADAPTIVE_RWLOCKS
495 volatile struct thread *owner;
496#endif
495}
496
497/*
498 * This function is called when we are unable to obtain a write lock on the
499 * first try. This means that at least one other thread holds either a
500 * read or write lock.
501 */
502void
503_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
504{
505 struct turnstile *ts;
506#ifdef ADAPTIVE_RWLOCKS
507 volatile struct thread *owner;
508#endif
509 uint64_t waittime = 0;
497 uintptr_t v;
510 uintptr_t v;
511 int contested = 0;
498
499 if (rw_wlocked(rw)) {
500 KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
501 ("%s: recursing but non-recursive rw %s @ %s:%d\n",
502 __func__, rw->lock_object.lo_name, file, line));
503 rw->rw_recurse++;
504 atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
505 if (LOCK_LOG_TEST(&rw->lock_object, 0))

--- 67 unchanged lines hidden (view full) ---

573 * running or the state of the lock changes.
574 */
575 owner = (struct thread *)RW_OWNER(v);
576 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
577 turnstile_cancel(ts);
578 if (LOCK_LOG_TEST(&rw->lock_object, 0))
579 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
580 __func__, rw, owner);
512
513 if (rw_wlocked(rw)) {
514 KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
515 ("%s: recursing but non-recursive rw %s @ %s:%d\n",
516 __func__, rw->lock_object.lo_name, file, line));
517 rw->rw_recurse++;
518 atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
519 if (LOCK_LOG_TEST(&rw->lock_object, 0))

--- 67 unchanged lines hidden (view full) ---

587 * running or the state of the lock changes.
588 */
589 owner = (struct thread *)RW_OWNER(v);
590 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
591 turnstile_cancel(ts);
592 if (LOCK_LOG_TEST(&rw->lock_object, 0))
593 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
594 __func__, rw, owner);
595 lock_profile_obtain_lock_failed(&rw->lock_object,
596 &contested, &waittime);
581 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
582 TD_IS_RUNNING(owner))
583 cpu_spinwait();
584 continue;
585 }
586#endif
587
588 /*
589 * We were unable to acquire the lock and the write waiters
590 * flag is set, so we must block on the turnstile.
591 */
592 if (LOCK_LOG_TEST(&rw->lock_object, 0))
593 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
594 rw);
597 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
598 TD_IS_RUNNING(owner))
599 cpu_spinwait();
600 continue;
601 }
602#endif
603
604 /*
605 * We were unable to acquire the lock and the write waiters
606 * flag is set, so we must block on the turnstile.
607 */
608 if (LOCK_LOG_TEST(&rw->lock_object, 0))
609 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
610 rw);
611 lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
612 &waittime);
595 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
596 if (LOCK_LOG_TEST(&rw->lock_object, 0))
597 CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
598 __func__, rw);
599 }
613 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
614 if (LOCK_LOG_TEST(&rw->lock_object, 0))
615 CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
616 __func__, rw);
617 }
618 lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
619 file, line);
600}
601
602/*
603 * This function is called if the first try at releasing a write lock failed.
604 * This means that one of the 2 waiter bits must be set indicating that at
605 * least one thread is waiting on this lock.
606 */
607void

--- 364 unchanged lines hidden ---
620}
621
622/*
623 * This function is called if the first try at releasing a write lock failed.
624 * This means that one of the 2 waiter bits must be set indicating that at
625 * least one thread is waiting on this lock.
626 */
627void

--- 364 unchanged lines hidden ---