1/* $NetBSD: kern_sleepq.c,v 1.44 2011/10/31 12:18:32 yamt Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup 34 * interfaces. 35 */ 36 37#include <sys/cdefs.h> 38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.44 2011/10/31 12:18:32 yamt Exp $"); 39 40#include <sys/param.h> 41#include <sys/kernel.h> 42#include <sys/cpu.h> 43#include <sys/pool.h> 44#include <sys/proc.h> 45#include <sys/resourcevar.h> 46#include <sys/sa.h> 47#include <sys/savar.h> 48#include <sys/sched.h> 49#include <sys/systm.h> 50#include <sys/sleepq.h> 51#include <sys/ktrace.h> 52 53#include "opt_sa.h" 54 55static int sleepq_sigtoerror(lwp_t *, int); 56 57/* General purpose sleep table, used by mtsleep() and condition variables. */ 58sleeptab_t sleeptab __cacheline_aligned; 59 60/* 61 * sleeptab_init: 62 * 63 * Initialize a sleep table. 64 */ 65void 66sleeptab_init(sleeptab_t *st) 67{ 68 sleepq_t *sq; 69 int i; 70 71 for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) { 72 sq = &st->st_queues[i].st_queue; 73 st->st_queues[i].st_mutex = 74 mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 75 sleepq_init(sq); 76 } 77} 78 79/* 80 * sleepq_init: 81 * 82 * Prepare a sleep queue for use. 83 */ 84void 85sleepq_init(sleepq_t *sq) 86{ 87 88 TAILQ_INIT(sq); 89} 90 91/* 92 * sleepq_remove: 93 * 94 * Remove an LWP from a sleep queue and wake it up. 95 */ 96void 97sleepq_remove(sleepq_t *sq, lwp_t *l) 98{ 99 struct schedstate_percpu *spc; 100 struct cpu_info *ci; 101 102 KASSERT(lwp_locked(l, NULL)); 103 104 TAILQ_REMOVE(sq, l, l_sleepchain); 105 l->l_syncobj = &sched_syncobj; 106 l->l_wchan = NULL; 107 l->l_sleepq = NULL; 108 l->l_flag &= ~LW_SINTR; 109 110 ci = l->l_cpu; 111 spc = &ci->ci_schedstate; 112 113 /* 114 * If not sleeping, the LWP must have been suspended. Let whoever 115 * holds it stopped set it running again. 116 */ 117 if (l->l_stat != LSSLEEP) { 118 KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED); 119 lwp_setlock(l, spc->spc_lwplock); 120 return; 121 } 122 123 /* 124 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 125 * about to call mi_switch(), in which case it will yield. 126 */ 127 if ((l->l_pflag & LP_RUNNING) != 0) { 128 l->l_stat = LSONPROC; 129 l->l_slptime = 0; 130 lwp_setlock(l, spc->spc_lwplock); 131 return; 132 } 133 134 /* Update sleep time delta, call the wake-up handler of scheduler */ 135 l->l_slpticksum += (hardclock_ticks - l->l_slpticks); 136 sched_wakeup(l); 137 138 /* Look for a CPU to wake up */ 139 l->l_cpu = sched_takecpu(l); 140 ci = l->l_cpu; 141 spc = &ci->ci_schedstate; 142 143 /* 144 * Set it running. 145 */ 146 spc_lock(ci); 147 lwp_setlock(l, spc->spc_mutex); 148#ifdef KERN_SA 149 if (l->l_proc->p_sa != NULL) 150 sa_awaken(l); 151#endif /* KERN_SA */ 152 sched_setrunnable(l); 153 l->l_stat = LSRUN; 154 l->l_slptime = 0; 155 sched_enqueue(l, false); 156 spc_unlock(ci); 157} 158 159/* 160 * sleepq_insert: 161 * 162 * Insert an LWP into the sleep queue, optionally sorting by priority. 163 */ 164void 165sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj) 166{ 167 168 if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) { 169 lwp_t *l2; 170 const int pri = lwp_eprio(l); 171 172 TAILQ_FOREACH(l2, sq, l_sleepchain) { 173 if (lwp_eprio(l2) < pri) { 174 TAILQ_INSERT_BEFORE(l2, l, l_sleepchain); 175 return; 176 } 177 } 178 } 179 180 if ((sobj->sobj_flag & SOBJ_SLEEPQ_LIFO) != 0) 181 TAILQ_INSERT_HEAD(sq, l, l_sleepchain); 182 else 183 TAILQ_INSERT_TAIL(sq, l, l_sleepchain); 184} 185 186/* 187 * sleepq_enqueue: 188 * 189 * Enter an LWP into the sleep queue and prepare for sleep. The sleep 190 * queue must already be locked, and any interlock (such as the kernel 191 * lock) must have be released (see sleeptab_lookup(), sleepq_enter()). 192 */ 193void 194sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj) 195{ 196 lwp_t *l = curlwp; 197 198 KASSERT(lwp_locked(l, NULL)); 199 KASSERT(l->l_stat == LSONPROC); 200 KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL); 201 202 l->l_syncobj = sobj; 203 l->l_wchan = wchan; 204 l->l_sleepq = sq; 205 l->l_wmesg = wmesg; 206 l->l_slptime = 0; 207 l->l_stat = LSSLEEP; 208 l->l_sleeperr = 0; 209 210 sleepq_insert(sq, l, sobj); 211 212 /* Save the time when thread has slept */ 213 l->l_slpticks = hardclock_ticks; 214 sched_slept(l); 215} 216 217/* 218 * sleepq_block: 219 * 220 * After any intermediate step such as releasing an interlock, switch. 221 * sleepq_block() may return early under exceptional conditions, for 222 * example if the LWP's containing process is exiting. 223 */ 224int 225sleepq_block(int timo, bool catch) 226{ 227 int error = 0, sig; 228 struct proc *p; 229 lwp_t *l = curlwp; 230 bool early = false; 231 int biglocks = l->l_biglocks; 232 233 ktrcsw(1, 0); 234 235 /* 236 * If sleeping interruptably, check for pending signals, exits or 237 * core dump events. 238 */ 239 if (catch) { 240 l->l_flag |= LW_SINTR; 241 if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) { 242 l->l_flag &= ~LW_CANCELLED; 243 error = EINTR; 244 early = true; 245 } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) 246 early = true; 247 } 248 249 if (early) { 250 /* lwp_unsleep() will release the lock */ 251 lwp_unsleep(l, true); 252 } else { 253 if (timo) 254 callout_schedule(&l->l_timeout_ch, timo); 255 256#ifdef KERN_SA 257 if (((l->l_flag & LW_SA) != 0) && (~l->l_pflag & LP_SA_NOBLOCK)) 258 sa_switch(l); 259 else 260#endif 261 mi_switch(l); 262 263 /* The LWP and sleep queue are now unlocked. */ 264 if (timo) { 265 /* 266 * Even if the callout appears to have fired, we need to 267 * stop it in order to synchronise with other CPUs. 268 */ 269 if (callout_halt(&l->l_timeout_ch, NULL)) 270 error = EWOULDBLOCK; 271 } 272 } 273 274 if (catch && error == 0) { 275 p = l->l_proc; 276 if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0) 277 error = EINTR; 278 else if ((l->l_flag & LW_PENDSIG) != 0) { 279 /* 280 * Acquiring p_lock may cause us to recurse 281 * through the sleep path and back into this 282 * routine, but is safe because LWPs sleeping 283 * on locks are non-interruptable. We will 284 * not recurse again. 285 */ 286 mutex_enter(p->p_lock); 287 if (((sig = sigispending(l, 0)) != 0 && 288 (sigprop[sig] & SA_STOP) == 0) || 289 (sig = issignal(l)) != 0) 290 error = sleepq_sigtoerror(l, sig); 291 mutex_exit(p->p_lock); 292 } 293 } 294 295 ktrcsw(0, 0); 296 if (__predict_false(biglocks != 0)) { 297 KERNEL_LOCK(biglocks, NULL); 298 } 299 return error; 300} 301 302/* 303 * sleepq_wake: 304 * 305 * Wake zero or more LWPs blocked on a single wait channel. 306 */ 307lwp_t * 308sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) 309{ 310 lwp_t *l, *next; 311 312 KASSERT(mutex_owned(mp)); 313 314 for (l = TAILQ_FIRST(sq); l != NULL; l = next) { 315 KASSERT(l->l_sleepq == sq); 316 KASSERT(l->l_mutex == mp); 317 next = TAILQ_NEXT(l, l_sleepchain); 318 if (l->l_wchan != wchan) 319 continue; 320 sleepq_remove(sq, l); 321 if (--expected == 0) 322 break; 323 } 324 325 mutex_spin_exit(mp); 326 return l; 327} 328 329/* 330 * sleepq_unsleep: 331 * 332 * Remove an LWP from its sleep queue and set it runnable again. 333 * sleepq_unsleep() is called with the LWP's mutex held, and will 334 * always release it. 335 */ 336void 337sleepq_unsleep(lwp_t *l, bool cleanup) 338{ 339 sleepq_t *sq = l->l_sleepq; 340 kmutex_t *mp = l->l_mutex; 341 342 KASSERT(lwp_locked(l, mp)); 343 KASSERT(l->l_wchan != NULL); 344 345 sleepq_remove(sq, l); 346 if (cleanup) { 347 mutex_spin_exit(mp); 348 } 349} 350 351/* 352 * sleepq_timeout: 353 * 354 * Entered via the callout(9) subsystem to time out an LWP that is on a 355 * sleep queue. 356 */ 357void 358sleepq_timeout(void *arg) 359{ 360 lwp_t *l = arg; 361 362 /* 363 * Lock the LWP. Assuming it's still on the sleep queue, its 364 * current mutex will also be the sleep queue mutex. 365 */ 366 lwp_lock(l); 367 368 if (l->l_wchan == NULL) { 369 /* Somebody beat us to it. */ 370 lwp_unlock(l); 371 return; 372 } 373 374 lwp_unsleep(l, true); 375} 376 377/* 378 * sleepq_sigtoerror: 379 * 380 * Given a signal number, interpret and return an error code. 381 */ 382static int 383sleepq_sigtoerror(lwp_t *l, int sig) 384{ 385 struct proc *p = l->l_proc; 386 int error; 387 388 KASSERT(mutex_owned(p->p_lock)); 389 390 /* 391 * If this sleep was canceled, don't let the syscall restart. 392 */ 393 if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0) 394 error = EINTR; 395 else 396 error = ERESTART; 397 398 return error; 399} 400 401/* 402 * sleepq_abort: 403 * 404 * After a panic or during autoconfiguration, lower the interrupt 405 * priority level to give pending interrupts a chance to run, and 406 * then return. Called if sleepq_dontsleep() returns non-zero, and 407 * always returns zero. 408 */ 409int 410sleepq_abort(kmutex_t *mtx, int unlock) 411{ 412 extern int safepri; 413 int s; 414 415 s = splhigh(); 416 splx(safepri); 417 splx(s); 418 if (mtx != NULL && unlock != 0) 419 mutex_exit(mtx); 420 421 return 0; 422} 423 424/* 425 * sleepq_reinsert: 426 * 427 * Move the possition of the lwp in the sleep queue after a possible 428 * change of the lwp's effective priority. 429 */ 430static void 431sleepq_reinsert(sleepq_t *sq, lwp_t *l) 432{ 433 434 KASSERT(l->l_sleepq == sq); 435 if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) { 436 return; 437 } 438 439 /* 440 * Don't let the sleep queue become empty, even briefly. 441 * cv_signal() and cv_broadcast() inspect it without the 442 * sleep queue lock held and need to see a non-empty queue 443 * head if there are waiters. 444 */ 445 if (TAILQ_FIRST(sq) == l && TAILQ_NEXT(l, l_sleepchain) == NULL) { 446 return; 447 } 448 TAILQ_REMOVE(sq, l, l_sleepchain); 449 sleepq_insert(sq, l, l->l_syncobj); 450} 451 452/* 453 * sleepq_changepri: 454 * 455 * Adjust the priority of an LWP residing on a sleepq. 456 */ 457void 458sleepq_changepri(lwp_t *l, pri_t pri) 459{ 460 sleepq_t *sq = l->l_sleepq; 461 462 KASSERT(lwp_locked(l, NULL)); 463 464 l->l_priority = pri; 465 sleepq_reinsert(sq, l); 466} 467 468/* 469 * sleepq_changepri: 470 * 471 * Adjust the lended priority of an LWP residing on a sleepq. 472 */ 473void 474sleepq_lendpri(lwp_t *l, pri_t pri) 475{ 476 sleepq_t *sq = l->l_sleepq; 477 478 KASSERT(lwp_locked(l, NULL)); 479 480 l->l_inheritedprio = pri; 481 sleepq_reinsert(sq, l); 482} 483