kern_rwlock.c revision 177843
1111314Snyan/*- 2111314Snyan * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3111314Snyan * All rights reserved. 4111314Snyan * 5111314Snyan * Redistribution and use in source and binary forms, with or without 6111314Snyan * modification, are permitted provided that the following conditions 7111314Snyan * are met: 8111314Snyan * 1. Redistributions of source code must retain the above copyright 9111314Snyan * notice, this list of conditions and the following disclaimer. 10111314Snyan * 2. Redistributions in binary form must reproduce the above copyright 11111314Snyan * notice, this list of conditions and the following disclaimer in the 12125234Snyan * documentation and/or other materials provided with the distribution. 13127520Snyan * 3. Neither the name of the author nor the names of any co-contributors 14111314Snyan * may be used to endorse or promote products derived from this software 15111314Snyan * without specific prior written permission. 16111314Snyan * 17111314Snyan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18111314Snyan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19111314Snyan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20122755Snyan * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21122755Snyan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22122755Snyan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23122755Snyan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24111314Snyan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25111314Snyan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26111314Snyan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27122056Snyan * SUCH DAMAGE. 28124795Snyan */ 29142783Snyan 30142783Snyan/* 31142783Snyan * Machine independent bits of reader/writer lock implementation. 32142783Snyan */ 33142783Snyan 34145743Snyan#include <sys/cdefs.h> 35145743Snyan__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 177843 2008-04-01 20:31:55Z attilio $"); 36151634Sjhb 37145743Snyan#include "opt_ddb.h" 38145743Snyan#include "opt_no_adaptive_rwlocks.h" 39111314Snyan 40111314Snyan#include <sys/param.h> 41111314Snyan#include <sys/ktr.h> 42111314Snyan#include <sys/lock.h> 43111314Snyan#include <sys/mutex.h> 44111314Snyan#include <sys/proc.h> 45111314Snyan#include <sys/rwlock.h> 46111314Snyan#include <sys/systm.h> 47111314Snyan#include <sys/turnstile.h> 48111314Snyan 49111314Snyan#include <machine/cpu.h> 50111314Snyan 51111314SnyanCTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); 52111314Snyan 53111314Snyan#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 54111314Snyan#define ADAPTIVE_RWLOCKS 55124795Snyan#endif 56124795Snyan 57124795Snyan#ifdef DDB 58124795Snyan#include <ddb/ddb.h> 59111314Snyan 60111314Snyanstatic void db_show_rwlock(struct lock_object *lock); 61111314Snyan#endif 62111314Snyanstatic void assert_rw(struct lock_object *lock, int what); 63111314Snyanstatic void lock_rw(struct lock_object *lock, int how); 64111314Snyanstatic int unlock_rw(struct lock_object *lock); 65111314Snyan 66111314Snyanstruct lock_class lock_class_rw = { 67111314Snyan .lc_name = "rw", 68111314Snyan .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 69124795Snyan .lc_assert = assert_rw, 70124795Snyan#ifdef DDB 71124795Snyan .lc_ddb_show = db_show_rwlock, 72127520Snyan#endif 73111314Snyan .lc_lock = lock_rw, 74111314Snyan .lc_unlock = unlock_rw, 75111314Snyan}; 76125234Snyan 77137526Snyan/* 78137526Snyan * Return a pointer to the owning thread if the lock is write-locked or 79137526Snyan * NULL if the lock is unlocked or read-locked. 80137526Snyan */ 81137526Snyan#define rw_wowner(rw) \ 82124795Snyan ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 83127520Snyan (struct thread *)RW_OWNER((rw)->rw_lock)) 84124795Snyan 85111314Snyan/* 86111314Snyan * Returns if a write owner is recursed. Write ownership is not assured 87111314Snyan * here and should be previously checked. 88111314Snyan */ 89111314Snyan#define rw_recursed(rw) ((rw)->rw_recurse != 0) 90111314Snyan 91111314Snyan/* 92111314Snyan * Return true if curthread helds the lock. 93111314Snyan */ 94125234Snyan#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 95111314Snyan 96111314Snyan/* 97111314Snyan * Return a pointer to the owning thread for this lock who should receive 98111314Snyan * any priority lent by threads that block on this lock. Currently this 99111314Snyan * is identical to rw_wowner(). 100111314Snyan */ 101111314Snyan#define rw_owner(rw) rw_wowner(rw) 102111314Snyan 103111314Snyan#ifndef INVARIANTS 104111314Snyan#define _rw_assert(rw, what, file, line) 105111314Snyan#endif 106111314Snyan 107111314Snyanvoid 108111314Snyanassert_rw(struct lock_object *lock, int what) 109111314Snyan{ 110111314Snyan 111111314Snyan rw_assert((struct rwlock *)lock, what); 112111314Snyan} 113111314Snyan 114125234Snyanvoid 115111314Snyanlock_rw(struct lock_object *lock, int how) 116111314Snyan{ 117111314Snyan struct rwlock *rw; 118111314Snyan 119111314Snyan rw = (struct rwlock *)lock; 120111314Snyan if (how) 121111314Snyan rw_wlock(rw); 122111314Snyan else 123111314Snyan rw_rlock(rw); 124111314Snyan} 125111314Snyan 126111314Snyanint 127111314Snyanunlock_rw(struct lock_object *lock) 128111314Snyan{ 129111314Snyan struct rwlock *rw; 130111314Snyan 131111314Snyan rw = (struct rwlock *)lock; 132111314Snyan rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 133111314Snyan if (rw->rw_lock & RW_LOCK_READ) { 134111314Snyan rw_runlock(rw); 135111314Snyan return (0); 136111314Snyan } else { 137111314Snyan rw_wunlock(rw); 138111314Snyan return (1); 139111314Snyan } 140111314Snyan} 141111314Snyan 142111314Snyanvoid 143124795Snyanrw_init_flags(struct rwlock *rw, const char *name, int opts) 144111314Snyan{ 145111314Snyan int flags; 146111314Snyan 147111314Snyan MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 148124795Snyan RW_RECURSE)) == 0); 149124795Snyan 150111314Snyan flags = LO_UPGRADABLE | LO_RECURSABLE; 151111314Snyan if (opts & RW_DUPOK) 152111314Snyan flags |= LO_DUPOK; 153111314Snyan if (opts & RW_NOPROFILE) 154111314Snyan flags |= LO_NOPROFILE; 155111314Snyan if (!(opts & RW_NOWITNESS)) 156111314Snyan flags |= LO_WITNESS; 157111314Snyan if (opts & RW_QUIET) 158111314Snyan flags |= LO_QUIET; 159111314Snyan flags |= opts & RW_RECURSE; 160111314Snyan 161111314Snyan rw->rw_lock = RW_UNLOCKED; 162111314Snyan rw->rw_recurse = 0; 163111314Snyan lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 164111314Snyan} 165124795Snyan 166111314Snyanvoid 167111314Snyanrw_destroy(struct rwlock *rw) 168111314Snyan{ 169111314Snyan 170111314Snyan KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 171111314Snyan KASSERT(rw->rw_recurse == 0, ("rw lock still recursed")); 172111314Snyan rw->rw_lock = RW_DESTROYED; 173111314Snyan lock_destroy(&rw->lock_object); 174111314Snyan} 175111314Snyan 176111314Snyanvoid 177111314Snyanrw_sysinit(void *arg) 178111314Snyan{ 179111314Snyan struct rw_args *args = arg; 180111314Snyan 181111314Snyan rw_init(args->ra_rw, args->ra_desc); 182124795Snyan} 183111314Snyan 184111314Snyanint 185111314Snyanrw_wowned(struct rwlock *rw) 186151051Sglebius{ 187151051Sglebius 188151051Sglebius return (rw_wowner(rw) == curthread); 189151051Sglebius} 190111314Snyan 191124408Snyanvoid 192124408Snyan_rw_wlock(struct rwlock *rw, const char *file, int line) 193111314Snyan{ 194111314Snyan 195111314Snyan MPASS(curthread != NULL); 196111314Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 197111314Snyan ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 198111314Snyan WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 199111314Snyan line); 200111314Snyan __rw_wlock(rw, curthread, file, line); 201111314Snyan LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 202111314Snyan WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 203123984Sbde curthread->td_locks++; 204123984Sbde} 205123984Sbde 206123984Sbdeint 207111314Snyan_rw_try_wlock(struct rwlock *rw, const char *file, int line) 208111314Snyan{ 209123984Sbde int rval; 210123984Sbde 211111314Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 212111314Snyan ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 213111314Snyan 214111314Snyan if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) { 215111314Snyan rw->rw_recurse++; 216111314Snyan rval = 1; 217160813Smarcel } else 218160813Smarcel rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, 219163494Simp (uintptr_t)curthread); 220160813Smarcel 221160813Smarcel LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 222160813Smarcel if (rval) { 223160813Smarcel WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 224160813Smarcel file, line); 225160813Smarcel curthread->td_locks++; 226160813Smarcel } 227160813Smarcel return (rval); 228160813Smarcel} 229160813Smarcel 230160813Smarcelvoid 231160813Smarcel_rw_wunlock(struct rwlock *rw, const char *file, int line) 232160813Smarcel{ 233160813Smarcel 234160813Smarcel MPASS(curthread != NULL); 235160813Smarcel KASSERT(rw->rw_lock != RW_DESTROYED, 236160813Smarcel ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 237160813Smarcel _rw_assert(rw, RA_WLOCKED, file, line); 238160813Smarcel curthread->td_locks--; 239160813Smarcel WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 240160813Smarcel LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 241160813Smarcel line); 242160813Smarcel if (!rw_recursed(rw)) 243160813Smarcel lock_profile_release_lock(&rw->lock_object); 244160813Smarcel __rw_wunlock(rw, curthread, file, line); 245160813Smarcel} 246160813Smarcel/* 247160813Smarcel * Determines whether a new reader can acquire a lock. Succeeds if the 248160813Smarcel * reader already owns a read lock and the lock is locked for read to 249160813Smarcel * prevent deadlock from reader recursion. Also succeeds if the lock 250160813Smarcel * is unlocked and has no writer waiters or spinners. Failing otherwise 251160813Smarcel * prioritizes writers before readers. 252160813Smarcel */ 253160813Smarcel#define RW_CAN_READ(_rw) \ 254160813Smarcel ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 255111314Snyan (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 256111314Snyan RW_LOCK_READ) 257111314Snyan 258124795Snyanvoid 259111314Snyan_rw_rlock(struct rwlock *rw, const char *file, int line) 260111314Snyan{ 261111314Snyan struct turnstile *ts; 262111314Snyan#ifdef ADAPTIVE_RWLOCKS 263111314Snyan volatile struct thread *owner; 264111314Snyan#endif 265111314Snyan uint64_t waittime = 0; 266111314Snyan int contested = 0; 267111314Snyan uintptr_t v; 268111314Snyan 269111314Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 270111314Snyan ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 271111314Snyan KASSERT(rw_wowner(rw) != curthread, 272111314Snyan ("%s (%s): wlock already held @ %s:%d", __func__, 273111314Snyan rw->lock_object.lo_name, file, line)); 274111314Snyan WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 275111314Snyan 276111314Snyan for (;;) { 277111314Snyan /* 278111314Snyan * Handle the easy case. If no other thread has a write 279111314Snyan * lock, then try to bump up the count of read locks. Note 280111314Snyan * that we have to preserve the current state of the 281111314Snyan * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 282111314Snyan * read lock, then rw_lock must have changed, so restart 283111314Snyan * the loop. Note that this handles the case of a 284111314Snyan * completely unlocked rwlock since such a lock is encoded 285111314Snyan * as a read lock with no waiters. 286111314Snyan */ 287111314Snyan v = rw->rw_lock; 288111314Snyan if (RW_CAN_READ(v)) { 289111314Snyan /* 290111314Snyan * The RW_LOCK_READ_WAITERS flag should only be set 291111314Snyan * if the lock has been unlocked and write waiters 292111314Snyan * were present. 293111314Snyan */ 294111314Snyan if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, 295111314Snyan v + RW_ONE_READER)) { 296111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 297111314Snyan CTR4(KTR_LOCK, 298111314Snyan "%s: %p succeed %p -> %p", __func__, 299111314Snyan rw, (void *)v, 300111314Snyan (void *)(v + RW_ONE_READER)); 301111314Snyan break; 302111314Snyan } 303111314Snyan cpu_spinwait(); 304111314Snyan continue; 305111314Snyan } 306111314Snyan lock_profile_obtain_lock_failed(&rw->lock_object, 307111314Snyan &contested, &waittime); 308111314Snyan 309111314Snyan#ifdef ADAPTIVE_RWLOCKS 310111314Snyan /* 311111314Snyan * If the owner is running on another CPU, spin until 312111314Snyan * the owner stops running or the state of the lock 313111314Snyan * changes. 314111314Snyan */ 315111314Snyan if ((v & RW_LOCK_READ) == 0) { 316111314Snyan owner = (struct thread *)RW_OWNER(v); 317111314Snyan if (TD_IS_RUNNING(owner)) { 318111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 319111314Snyan CTR3(KTR_LOCK, 320111314Snyan "%s: spinning on %p held by %p", 321111314Snyan __func__, rw, owner); 322111314Snyan while ((struct thread*)RW_OWNER(rw->rw_lock) == 323111314Snyan owner && TD_IS_RUNNING(owner)) 324111314Snyan cpu_spinwait(); 325111314Snyan continue; 326131815Snyan } 327111314Snyan } 328111314Snyan#endif 329111314Snyan 330111314Snyan /* 331111314Snyan * Okay, now it's the hard case. Some other thread already 332111314Snyan * has a write lock or there are write waiters present, 333111314Snyan * acquire the turnstile lock so we can begin the process 334111314Snyan * of blocking. 335111314Snyan */ 336111314Snyan ts = turnstile_trywait(&rw->lock_object); 337111314Snyan 338111314Snyan /* 339111314Snyan * The lock might have been released while we spun, so 340111314Snyan * recheck its state and restart the loop if needed. 341111314Snyan */ 342140371Sru v = rw->rw_lock; 343111314Snyan if (RW_CAN_READ(v)) { 344111314Snyan turnstile_cancel(ts); 345111314Snyan cpu_spinwait(); 346111314Snyan continue; 347111314Snyan } 348111314Snyan 349111314Snyan#ifdef ADAPTIVE_RWLOCKS 350111314Snyan /* 351124795Snyan * If the current owner of the lock is executing on another 352124795Snyan * CPU quit the hard path and try to spin. 353124795Snyan */ 354111314Snyan if ((v & RW_LOCK_READ) == 0) { 355111314Snyan owner = (struct thread *)RW_OWNER(v); 356111314Snyan if (TD_IS_RUNNING(owner)) { 357156272Snyan turnstile_cancel(ts); 358111314Snyan cpu_spinwait(); 359126708Snyan continue; 360156272Snyan } 361111314Snyan } 362145183Snyan#endif 363145183Snyan 364145183Snyan /* 365145183Snyan * The lock is held in write mode or it already has waiters. 366145183Snyan */ 367148235Snyan MPASS(!RW_CAN_READ(v)); 368152952Snyan 369145183Snyan /* 370145183Snyan * If the RW_LOCK_READ_WAITERS flag is already set, then 371145183Snyan * we can go ahead and block. If it is not set then try 372111314Snyan * to set it. If we fail to set it drop the turnstile 373111314Snyan * lock and restart the loop. 374111314Snyan */ 375111314Snyan if (!(v & RW_LOCK_READ_WAITERS)) { 376111314Snyan if (!atomic_cmpset_ptr(&rw->rw_lock, v, 377111314Snyan v | RW_LOCK_READ_WAITERS)) { 378111314Snyan turnstile_cancel(ts); 379111314Snyan cpu_spinwait(); 380111314Snyan continue; 381111314Snyan } 382111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 383111314Snyan CTR2(KTR_LOCK, "%s: %p set read waiters flag", 384111314Snyan __func__, rw); 385111314Snyan } 386111314Snyan 387158357Snyan /* 388155215Snyan * We were unable to acquire the lock and the read waiters 389155215Snyan * flag is set, so we must block on the turnstile. 390155215Snyan */ 391129384Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 392129384Snyan CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 393129384Snyan rw); 394129384Snyan turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 395153581Simp if (LOCK_LOG_TEST(&rw->lock_object, 0)) 396111314Snyan CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 397150555Simp __func__, rw); 398111314Snyan } 399111314Snyan 400111314Snyan /* 401126708Snyan * TODO: acquire "owner of record" here. Here be turnstile dragons 402111314Snyan * however. turnstiles don't like owners changing between calls to 403126708Snyan * turnstile_wait() currently. 404159549Sjhb */ 405126708Snyan lock_profile_obtain_lock_success( &rw->lock_object, contested, 406111314Snyan waittime, file, line); 407159549Sjhb LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 408111314Snyan WITNESS_LOCK(&rw->lock_object, 0, file, line); 409111314Snyan curthread->td_locks++; 410111314Snyan curthread->td_rw_rlocks++; 411111314Snyan} 412155215Snyan 413129384Snyanint 414153581Simp_rw_try_rlock(struct rwlock *rw, const char *file, int line) 415111314Snyan{ 416111314Snyan uintptr_t x; 417111314Snyan 418111314Snyan for (;;) { 419111314Snyan x = rw->rw_lock; 420112840Smdodd KASSERT(rw->rw_lock != RW_DESTROYED, 421112840Smdodd ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 422112840Smdodd if (!(x & RW_LOCK_READ)) 423112840Smdodd break; 424112840Smdodd if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { 425158712Smarius LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 426158712Smarius line); 427158712Smarius WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 428158712Smarius curthread->td_locks++; 429159549Sjhb curthread->td_rw_rlocks++; 430159549Sjhb return (1); 431111314Snyan } 432111314Snyan } 433111314Snyan 434111314Snyan LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 435111314Snyan return (0); 436111314Snyan} 437111314Snyan 438111314Snyanvoid 439111314Snyan_rw_runlock(struct rwlock *rw, const char *file, int line) 440111314Snyan{ 441111314Snyan struct turnstile *ts; 442159549Sjhb uintptr_t x, v, queue; 443111314Snyan 444158357Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 445158357Snyan ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 446158357Snyan _rw_assert(rw, RA_RLOCKED, file, line); 447158357Snyan curthread->td_locks--; 448158357Snyan curthread->td_rw_rlocks--; 449158357Snyan WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 450158357Snyan LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 451125234Snyan 452111314Snyan /* TODO: drop "owner of record" here. */ 453125234Snyan 454111314Snyan for (;;) { 455111314Snyan /* 456111314Snyan * See if there is more than one read lock held. If so, 457111314Snyan * just drop one and return. 458111314Snyan */ 459111314Snyan x = rw->rw_lock; 460111314Snyan if (RW_READERS(x) > 1) { 461126708Snyan if (atomic_cmpset_ptr(&rw->rw_lock, x, 462126708Snyan x - RW_ONE_READER)) { 463126708Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 464111314Snyan CTR4(KTR_LOCK, 465111314Snyan "%s: %p succeeded %p -> %p", 466117918Snyan __func__, rw, (void *)x, 467117918Snyan (void *)(x - RW_ONE_READER)); 468117918Snyan break; 469117918Snyan } 470142783Snyan continue; 471142783Snyan } 472117918Snyan /* 473117918Snyan * If there aren't any waiters for a write lock, then try 474117918Snyan * to drop it quickly. 475117918Snyan */ 476111314Snyan if (!(x & RW_LOCK_WAITERS)) { 477111314Snyan MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 478111314Snyan RW_READERS_LOCK(1)); 479111314Snyan if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) { 480124795Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 481111314Snyan CTR2(KTR_LOCK, "%s: %p last succeeded", 482111314Snyan __func__, rw); 483111314Snyan break; 484111314Snyan } 485111314Snyan continue; 486111314Snyan } 487111314Snyan /* 488111314Snyan * Ok, we know we have waiters and we think we are the 489111314Snyan * last reader, so grab the turnstile lock. 490111314Snyan */ 491128876Sbde turnstile_chain_lock(&rw->lock_object); 492127945Snyan v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 493111314Snyan MPASS(v & RW_LOCK_WAITERS); 494111314Snyan 495111314Snyan /* 496111314Snyan * Try to drop our lock leaving the lock in a unlocked 497111314Snyan * state. 498111314Snyan * 499111314Snyan * If you wanted to do explicit lock handoff you'd have to 500111314Snyan * do it here. You'd also want to use turnstile_signal() 501111314Snyan * and you'd have to handle the race where a higher 502124795Snyan * priority thread blocks on the write lock before the 503124795Snyan * thread you wakeup actually runs and have the new thread 504124795Snyan * "steal" the lock. For now it's a lot simpler to just 505128221Simp * wakeup all of the waiters. 506128221Simp * 507128221Simp * As above, if we fail, then another thread might have 508128221Simp * acquired a read lock, so drop the turnstile lock and 509111314Snyan * restart. 510111314Snyan */ 511111314Snyan x = RW_UNLOCKED; 512111314Snyan if (v & RW_LOCK_WRITE_WAITERS) { 513111314Snyan queue = TS_EXCLUSIVE_QUEUE; 514111314Snyan x |= (v & RW_LOCK_READ_WAITERS); 515111314Snyan } else 516111314Snyan queue = TS_SHARED_QUEUE; 517111314Snyan if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 518111314Snyan x)) { 519111314Snyan turnstile_chain_unlock(&rw->lock_object); 520111314Snyan continue; 521111314Snyan } 522111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 523111314Snyan CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 524111314Snyan __func__, rw); 525111314Snyan 526111314Snyan /* 527111314Snyan * Ok. The lock is released and all that's left is to 528111314Snyan * wake up the waiters. Note that the lock might not be 529111314Snyan * free anymore, but in that case the writers will just 530111314Snyan * block again if they run before the new lock holder(s) 531111314Snyan * release the lock. 532111314Snyan */ 533111314Snyan ts = turnstile_lookup(&rw->lock_object); 534125234Snyan MPASS(ts != NULL); 535111314Snyan turnstile_broadcast(ts, queue); 536111314Snyan turnstile_unpend(ts, TS_SHARED_LOCK); 537111314Snyan turnstile_chain_unlock(&rw->lock_object); 538111314Snyan break; 539111314Snyan } 540111314Snyan lock_profile_release_lock(&rw->lock_object); 541111314Snyan} 542111314Snyan 543111314Snyan/* 544111314Snyan * This function is called when we are unable to obtain a write lock on the 545125234Snyan * first try. This means that at least one other thread holds either a 546111314Snyan * read or write lock. 547111314Snyan */ 548111314Snyanvoid 549111314Snyan_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 550111314Snyan{ 551111314Snyan struct turnstile *ts; 552111314Snyan#ifdef ADAPTIVE_RWLOCKS 553111314Snyan volatile struct thread *owner; 554111314Snyan int spintries = 0; 555111314Snyan int i; 556111314Snyan#endif 557111314Snyan uint64_t waittime = 0; 558111314Snyan uintptr_t v, x; 559111314Snyan int contested = 0; 560111314Snyan 561111314Snyan if (rw_wlocked(rw)) { 562111314Snyan KASSERT(rw->lock_object.lo_flags & RW_RECURSE, 563126708Snyan ("%s: recursing but non-recursive rw %s @ %s:%d\n", 564111314Snyan __func__, rw->lock_object.lo_name, file, line)); 565111314Snyan rw->rw_recurse++; 566111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 567111314Snyan CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 568111314Snyan return; 569111314Snyan } 570111314Snyan 571126708Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 572111314Snyan CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 573111314Snyan rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 574111314Snyan 575111314Snyan while (!_rw_write_lock(rw, tid)) { 576111314Snyan lock_profile_obtain_lock_failed(&rw->lock_object, 577126708Snyan &contested, &waittime); 578111314Snyan#ifdef ADAPTIVE_RWLOCKS 579111314Snyan /* 580111314Snyan * If the lock is write locked and the owner is 581111314Snyan * running on another CPU, spin until the owner stops 582111314Snyan * running or the state of the lock changes. 583134634Sru */ 584111314Snyan v = rw->rw_lock; 585111314Snyan owner = (struct thread *)RW_OWNER(v); 586111314Snyan if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 587111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 588111314Snyan CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 589126708Snyan __func__, rw, owner); 590111314Snyan while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && 591111314Snyan TD_IS_RUNNING(owner)) 592125234Snyan cpu_spinwait(); 593111314Snyan continue; 594111314Snyan } 595111314Snyan if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { 596126708Snyan if (!(v & RW_LOCK_WRITE_SPINNER)) { 597111314Snyan if (!atomic_cmpset_ptr(&rw->rw_lock, v, 598111314Snyan v | RW_LOCK_WRITE_SPINNER)) { 599111314Snyan cpu_spinwait(); 600111314Snyan continue; 601126708Snyan } 602111314Snyan } 603111314Snyan spintries++; 604111314Snyan for (i = 100000; i > 0; i--) { 605111314Snyan if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 606111314Snyan break; 607134634Sru cpu_spinwait(); 608111314Snyan } 609111314Snyan if (i) 610134634Sru continue; 611111314Snyan } 612111314Snyan#endif 613134634Sru ts = turnstile_trywait(&rw->lock_object); 614111314Snyan v = rw->rw_lock; 615111314Snyan 616111314Snyan#ifdef ADAPTIVE_RWLOCKS 617111314Snyan /* 618111314Snyan * If the current owner of the lock is executing on another 619134634Sru * CPU quit the hard path and try to spin. 620142783Snyan */ 621111314Snyan if (!(v & RW_LOCK_READ)) { 622111314Snyan owner = (struct thread *)RW_OWNER(v); 623134634Sru if (TD_IS_RUNNING(owner)) { 624111314Snyan turnstile_cancel(ts); 625111314Snyan cpu_spinwait(); 626111314Snyan continue; 627111314Snyan } 628111314Snyan } 629134634Sru#endif 630142783Snyan /* 631111314Snyan * If the lock was released while waiting for the turnstile 632111314Snyan * chain lock retry. 633134634Sru */ 634134634Sru x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 635111314Snyan if ((v & ~x) == RW_UNLOCKED) { 636111314Snyan x &= ~RW_LOCK_WRITE_SPINNER; 637134634Sru if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 638134634Sru if (x) 639111314Snyan turnstile_claim(ts); 640111314Snyan else 641111314Snyan turnstile_cancel(ts); 642111314Snyan break; 643111314Snyan } 644111314Snyan turnstile_cancel(ts); 645111314Snyan cpu_spinwait(); 646134634Sru continue; 647134634Sru } 648111314Snyan /* 649111314Snyan * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 650134634Sru * set it. If we fail to set it, then loop back and try 651142783Snyan * again. 652111314Snyan */ 653111314Snyan if (!(v & RW_LOCK_WRITE_WAITERS)) { 654134634Sru if (!atomic_cmpset_ptr(&rw->rw_lock, v, 655111314Snyan v | RW_LOCK_WRITE_WAITERS)) { 656111314Snyan turnstile_cancel(ts); 657111314Snyan cpu_spinwait(); 658111314Snyan continue; 659111314Snyan } 660124795Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 661111314Snyan CTR2(KTR_LOCK, "%s: %p set write waiters flag", 662111314Snyan __func__, rw); 663111314Snyan } 664111314Snyan /* 665111314Snyan * We were unable to acquire the lock and the write waiters 666111314Snyan * flag is set, so we must block on the turnstile. 667111314Snyan */ 668111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 669111314Snyan CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 670111314Snyan rw); 671111314Snyan turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 672111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 673111314Snyan CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 674111314Snyan __func__, rw); 675111314Snyan#ifdef ADAPTIVE_RWLOCKS 676111314Snyan spintries = 0; 677111314Snyan#endif 678111314Snyan } 679111314Snyan lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime, 680111314Snyan file, line); 681111314Snyan} 682111314Snyan 683111314Snyan/* 684111314Snyan * This function is called if the first try at releasing a write lock failed. 685111314Snyan * This means that one of the 2 waiter bits must be set indicating that at 686111314Snyan * least one thread is waiting on this lock. 687111314Snyan */ 688111314Snyanvoid 689111314Snyan_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 690111314Snyan{ 691111314Snyan struct turnstile *ts; 692111314Snyan uintptr_t v; 693111314Snyan int queue; 694111314Snyan 695111314Snyan if (rw_wlocked(rw) && rw_recursed(rw)) { 696111314Snyan rw->rw_recurse--; 697111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 698111314Snyan CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 699111314Snyan return; 700158381Sambrisko } 701158381Sambrisko v = rw->rw_lock; 702158381Sambrisko 703158381Sambrisko KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 704111314Snyan ("%s: neither of the waiter flags are set", __func__)); 705111314Snyan 706111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 707111314Snyan CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 708125234Snyan 709125234Snyan turnstile_chain_lock(&rw->lock_object); 710111314Snyan ts = turnstile_lookup(&rw->lock_object); 711111314Snyan 712111314Snyan MPASS(ts != NULL); 713125234Snyan 714111314Snyan /* 715111314Snyan * Use the same algo as sx locks for now. Prefer waking up shared 716111314Snyan * waiters if we have any over writers. This is probably not ideal. 717111314Snyan * 718125234Snyan * 'v' is the value we are going to write back to rw_lock. If we 719125234Snyan * have waiters on both queues, we need to preserve the state of 720111314Snyan * the waiter flag for the queue we don't wake up. For now this is 721111314Snyan * hardcoded for the algorithm mentioned above. 722111314Snyan * 723111314Snyan * In the case of both readers and writers waiting we wakeup the 724111314Snyan * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 725111314Snyan * new writer comes in before a reader it will claim the lock up 726111314Snyan * above. There is probably a potential priority inversion in 727111314Snyan * there that could be worked around either by waking both queues 728111314Snyan * of waiters or doing some complicated lock handoff gymnastics. 729130596Snyan */ 730111314Snyan v = RW_UNLOCKED; 731111314Snyan if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 732111314Snyan queue = TS_EXCLUSIVE_QUEUE; 733111314Snyan v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 734111314Snyan } else 735111314Snyan queue = TS_SHARED_QUEUE; 736111314Snyan 737111314Snyan /* Wake up all waiters for the specific queue. */ 738111314Snyan if (LOCK_LOG_TEST(&rw->lock_object, 0)) 739111314Snyan CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 740111314Snyan queue == TS_SHARED_QUEUE ? "read" : "write"); 741111314Snyan turnstile_broadcast(ts, queue); 742111314Snyan atomic_store_rel_ptr(&rw->rw_lock, v); 743111314Snyan turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 744111314Snyan turnstile_chain_unlock(&rw->lock_object); 745111314Snyan} 746111314Snyan 747111314Snyan/* 748111314Snyan * Attempt to do a non-blocking upgrade from a read lock to a write 749111314Snyan * lock. This will only succeed if this thread holds a single read 750111314Snyan * lock. Returns true if the upgrade succeeded and false otherwise. 751111314Snyan */ 752111314Snyanint 753111314Snyan_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 754111314Snyan{ 755111314Snyan uintptr_t v, x, tid; 756111314Snyan struct turnstile *ts; 757111314Snyan int success; 758111314Snyan 759111314Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 760111314Snyan ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 761111314Snyan _rw_assert(rw, RA_RLOCKED, file, line); 762111314Snyan 763111314Snyan /* 764111314Snyan * Attempt to switch from one reader to a writer. If there 765111314Snyan * are any write waiters, then we will have to lock the 766111314Snyan * turnstile first to prevent races with another writer 767111314Snyan * calling turnstile_wait() before we have claimed this 768111314Snyan * turnstile. So, do the simple case of no waiters first. 769111314Snyan */ 770111314Snyan tid = (uintptr_t)curthread; 771111314Snyan success = 0; 772111314Snyan for (;;) { 773111500Sobrien v = rw->rw_lock; 774132960Snyan if (RW_READERS(v) > 1) 775132960Snyan break; 776132960Snyan if (!(v & RW_LOCK_WAITERS)) { 777132960Snyan success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 778111500Sobrien if (!success) 779111500Sobrien continue; 780111500Sobrien break; 781111500Sobrien } 782111500Sobrien 783111500Sobrien /* 784111500Sobrien * Ok, we think we have waiters, so lock the turnstile. 785116382Snyan */ 786116382Snyan ts = turnstile_trywait(&rw->lock_object); 787111500Sobrien v = rw->rw_lock; 788116382Snyan if (RW_READERS(v) > 1) { 789116382Snyan turnstile_cancel(ts); 790116382Snyan break; 791116382Snyan } 792111500Sobrien /* 793111500Sobrien * Try to switch from one reader to a writer again. This time 794111500Sobrien * we honor the current state of the waiters flags. 795111500Sobrien * If we obtain the lock with the flags set, then claim 796111500Sobrien * ownership of the turnstile. 797111500Sobrien */ 798111500Sobrien x = rw->rw_lock & RW_LOCK_WAITERS; 799111500Sobrien success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 800111500Sobrien if (success) { 801125086Snyan if (x) 802116382Snyan turnstile_claim(ts); 803116382Snyan else 804116382Snyan turnstile_cancel(ts); 805116382Snyan break; 806116382Snyan } 807116382Snyan turnstile_cancel(ts); 808116382Snyan } 809116382Snyan LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 810153643Snyan if (success) { 811116382Snyan curthread->td_rw_rlocks--; 812153643Snyan WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 813111582Sru file, line); 814111582Sru } 815111582Sru return (success); 816111582Sru} 817111582Sru 818111582Sru/* 819111582Sru * Downgrade a write lock into a single read lock. 820111582Sru */ 821111582Sruvoid 822111582Sru_rw_downgrade(struct rwlock *rw, const char *file, int line) 823111582Sru{ 824111582Sru struct turnstile *ts; 825116382Snyan uintptr_t tid, v; 826116382Snyan int rwait, wwait; 827116382Snyan 828116382Snyan KASSERT(rw->rw_lock != RW_DESTROYED, 829111582Sru ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 830111582Sru _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); 831111582Sru#ifndef INVARIANTS 832116382Snyan if (rw_recursed(rw)) 833116382Snyan panic("downgrade of a recursed lock"); 834111582Sru#endif 835111582Sru 836111582Sru WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 837111582Sru 838111582Sru /* 839111582Sru * Convert from a writer to a single reader. First we handle 840 * the easy case with no waiters. If there are any waiters, we 841 * lock the turnstile and "disown" the lock. 842 */ 843 tid = (uintptr_t)curthread; 844 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 845 goto out; 846 847 /* 848 * Ok, we think we have waiters, so lock the turnstile so we can 849 * read the waiter flags without any races. 850 */ 851 turnstile_chain_lock(&rw->lock_object); 852 v = rw->rw_lock & RW_LOCK_WAITERS; 853 rwait = v & RW_LOCK_READ_WAITERS; 854 wwait = v & RW_LOCK_WRITE_WAITERS; 855 MPASS(rwait | wwait); 856 857 /* 858 * Downgrade from a write lock while preserving waiters flag 859 * and give up ownership of the turnstile. 860 */ 861 ts = turnstile_lookup(&rw->lock_object); 862 MPASS(ts != NULL); 863 if (!wwait) 864 v &= ~RW_LOCK_READ_WAITERS; 865 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 866 /* 867 * Wake other readers if there are no writers pending. Otherwise they 868 * won't be able to acquire the lock anyway. 869 */ 870 if (rwait && !wwait) { 871 turnstile_broadcast(ts, TS_SHARED_QUEUE); 872 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 873 } else 874 turnstile_disown(ts); 875 turnstile_chain_unlock(&rw->lock_object); 876out: 877 curthread->td_rw_rlocks++; 878 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 879} 880 881#ifdef INVARIANT_SUPPORT 882#ifndef INVARIANTS 883#undef _rw_assert 884#endif 885 886/* 887 * In the non-WITNESS case, rw_assert() can only detect that at least 888 * *some* thread owns an rlock, but it cannot guarantee that *this* 889 * thread owns an rlock. 890 */ 891void 892_rw_assert(struct rwlock *rw, int what, const char *file, int line) 893{ 894 895 if (panicstr != NULL) 896 return; 897 switch (what) { 898 case RA_LOCKED: 899 case RA_LOCKED | RA_RECURSED: 900 case RA_LOCKED | RA_NOTRECURSED: 901 case RA_RLOCKED: 902#ifdef WITNESS 903 witness_assert(&rw->lock_object, what, file, line); 904#else 905 /* 906 * If some other thread has a write lock or we have one 907 * and are asserting a read lock, fail. Also, if no one 908 * has a lock at all, fail. 909 */ 910 if (rw->rw_lock == RW_UNLOCKED || 911 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 912 rw_wowner(rw) != curthread))) 913 panic("Lock %s not %slocked @ %s:%d\n", 914 rw->lock_object.lo_name, (what == RA_RLOCKED) ? 915 "read " : "", file, line); 916 917 if (!(rw->rw_lock & RW_LOCK_READ)) { 918 if (rw_recursed(rw)) { 919 if (what & RA_NOTRECURSED) 920 panic("Lock %s recursed @ %s:%d\n", 921 rw->lock_object.lo_name, file, 922 line); 923 } else if (what & RA_RECURSED) 924 panic("Lock %s not recursed @ %s:%d\n", 925 rw->lock_object.lo_name, file, line); 926 } 927#endif 928 break; 929 case RA_WLOCKED: 930 case RA_WLOCKED | RA_RECURSED: 931 case RA_WLOCKED | RA_NOTRECURSED: 932 if (rw_wowner(rw) != curthread) 933 panic("Lock %s not exclusively locked @ %s:%d\n", 934 rw->lock_object.lo_name, file, line); 935 if (rw_recursed(rw)) { 936 if (what & RA_NOTRECURSED) 937 panic("Lock %s recursed @ %s:%d\n", 938 rw->lock_object.lo_name, file, line); 939 } else if (what & RA_RECURSED) 940 panic("Lock %s not recursed @ %s:%d\n", 941 rw->lock_object.lo_name, file, line); 942 break; 943 case RA_UNLOCKED: 944#ifdef WITNESS 945 witness_assert(&rw->lock_object, what, file, line); 946#else 947 /* 948 * If we hold a write lock fail. We can't reliably check 949 * to see if we hold a read lock or not. 950 */ 951 if (rw_wowner(rw) == curthread) 952 panic("Lock %s exclusively locked @ %s:%d\n", 953 rw->lock_object.lo_name, file, line); 954#endif 955 break; 956 default: 957 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 958 line); 959 } 960} 961#endif /* INVARIANT_SUPPORT */ 962 963#ifdef DDB 964void 965db_show_rwlock(struct lock_object *lock) 966{ 967 struct rwlock *rw; 968 struct thread *td; 969 970 rw = (struct rwlock *)lock; 971 972 db_printf(" state: "); 973 if (rw->rw_lock == RW_UNLOCKED) 974 db_printf("UNLOCKED\n"); 975 else if (rw->rw_lock == RW_DESTROYED) { 976 db_printf("DESTROYED\n"); 977 return; 978 } else if (rw->rw_lock & RW_LOCK_READ) 979 db_printf("RLOCK: %ju locks\n", 980 (uintmax_t)(RW_READERS(rw->rw_lock))); 981 else { 982 td = rw_wowner(rw); 983 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 984 td->td_tid, td->td_proc->p_pid, td->td_name); 985 if (rw_recursed(rw)) 986 db_printf(" recursed: %u\n", rw->rw_recurse); 987 } 988 db_printf(" waiters: "); 989 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 990 case RW_LOCK_READ_WAITERS: 991 db_printf("readers\n"); 992 break; 993 case RW_LOCK_WRITE_WAITERS: 994 db_printf("writers\n"); 995 break; 996 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 997 db_printf("readers and writers\n"); 998 break; 999 default: 1000 db_printf("none\n"); 1001 break; 1002 } 1003} 1004 1005#endif 1006