1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * Mach Operating System 30 * Copyright (c) 1987 Carnegie-Mellon University 31 * All rights reserved. The CMU software License Agreement specifies 32 * the terms and conditions for use and redistribution. 33 */ 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/proc_internal.h> 38#include <sys/user.h> 39#include <sys/file_internal.h> 40#include <sys/vnode.h> 41#include <sys/kernel.h> 42 43#include <machine/spl.h> 44 45#include <kern/queue.h> 46#include <sys/lock.h> 47#include <kern/thread.h> 48#include <kern/sched_prim.h> 49#include <kern/ast.h> 50 51#include <kern/cpu_number.h> 52#include <vm/vm_kern.h> 53 54#include <kern/task.h> 55#include <mach/time_value.h> 56#include <kern/lock.h> 57 58#include <sys/systm.h> /* for unix_syscall_return() */ 59#include <libkern/OSAtomic.h> 60 61extern void compute_averunnable(void *); /* XXX */ 62 63 64 65static void 66_sleep_continue( __unused void *parameter, wait_result_t wresult) 67{ 68 struct proc *p = current_proc(); 69 thread_t self = current_thread(); 70 struct uthread * ut; 71 int sig, catch; 72 int error = 0; 73 int dropmutex, spinmutex; 74 75 ut = get_bsdthread_info(self); 76 catch = ut->uu_pri & PCATCH; 77 dropmutex = ut->uu_pri & PDROP; 78 spinmutex = ut->uu_pri & PSPIN; 79 80 switch (wresult) { 81 case THREAD_TIMED_OUT: 82 error = EWOULDBLOCK; 83 break; 84 case THREAD_AWAKENED: 85 /* 86 * Posix implies any signal should be delivered 87 * first, regardless of whether awakened due 88 * to receiving event. 89 */ 90 if (!catch) 91 break; 92 /* else fall through */ 93 case THREAD_INTERRUPTED: 94 if (catch) { 95 if (thread_should_abort(self)) { 96 error = EINTR; 97 } else if (SHOULDissignal(p,ut)) { 98 if ((sig = CURSIG(p)) != 0) { 99 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 100 error = EINTR; 101 else 102 error = ERESTART; 103 } 104 if (thread_should_abort(self)) { 105 error = EINTR; 106 } 107 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { 108 /* due to thread cancel */ 109 error = EINTR; 110 } 111 } else 112 error = EINTR; 113 break; 114 } 115 116 if (error == EINTR || error == ERESTART) 117 act_set_astbsd(self); 118 119 if (ut->uu_mtx && !dropmutex) { 120 if (spinmutex) 121 lck_mtx_lock_spin(ut->uu_mtx); 122 else 123 lck_mtx_lock(ut->uu_mtx); 124 } 125 ut->uu_wchan = NULL; 126 ut->uu_wmesg = NULL; 127 128 unix_syscall_return((*ut->uu_continuation)(error)); 129} 130 131/* 132 * Give up the processor till a wakeup occurs 133 * on chan, at which time the process 134 * enters the scheduling queue at priority pri. 135 * The most important effect of pri is that when 136 * pri<=PZERO a signal cannot disturb the sleep; 137 * if pri>PZERO signals will be processed. 138 * If pri&PCATCH is set, signals will cause sleep 139 * to return 1, rather than longjmp. 140 * Callers of this routine must be prepared for 141 * premature return, and check that the reason for 142 * sleeping has gone away. 143 * 144 * if msleep was the entry point, than we have a mutex to deal with 145 * 146 * The mutex is unlocked before the caller is blocked, and 147 * relocked before msleep returns unless the priority includes the PDROP 148 * flag... if PDROP is specified, _sleep returns with the mutex unlocked 149 * regardless of whether it actually blocked or not. 150 */ 151 152static int 153_sleep( 154 caddr_t chan, 155 int pri, 156 const char *wmsg, 157 u_int64_t abstime, 158 int (*continuation)(int), 159 lck_mtx_t *mtx) 160{ 161 struct proc *p; 162 thread_t self = current_thread(); 163 struct uthread * ut; 164 int sig, catch; 165 int dropmutex = pri & PDROP; 166 int spinmutex = pri & PSPIN; 167 int wait_result; 168 int error = 0; 169 170 ut = get_bsdthread_info(self); 171 172 p = current_proc(); 173 p->p_priority = pri & PRIMASK; 174 /* It can still block in proc_exit() after the teardown. */ 175 if (p->p_stats != NULL) 176 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw); 177 178 if (pri & PCATCH) 179 catch = THREAD_ABORTSAFE; 180 else 181 catch = THREAD_UNINT; 182 183 /* set wait message & channel */ 184 ut->uu_wchan = chan; 185 ut->uu_wmesg = wmsg ? wmsg : "unknown"; 186 187 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) { 188 int flags; 189 190 if (dropmutex) 191 flags = LCK_SLEEP_UNLOCK; 192 else 193 flags = LCK_SLEEP_DEFAULT; 194 195 if (spinmutex) 196 flags |= LCK_SLEEP_SPIN; 197 198 if (abstime) 199 wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime); 200 else 201 wait_result = lck_mtx_sleep(mtx, flags, chan, catch); 202 } 203 else { 204 if (chan != NULL) 205 assert_wait_deadline(chan, catch, abstime); 206 if (mtx) 207 lck_mtx_unlock(mtx); 208 209 if (catch == THREAD_ABORTSAFE) { 210 if (SHOULDissignal(p,ut)) { 211 if ((sig = CURSIG(p)) != 0) { 212 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) 213 goto block; 214 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 215 error = EINTR; 216 else 217 error = ERESTART; 218 if (mtx && !dropmutex) { 219 if (spinmutex) 220 lck_mtx_lock_spin(mtx); 221 else 222 lck_mtx_lock(mtx); 223 } 224 goto out; 225 } 226 } 227 if (thread_should_abort(self)) { 228 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) 229 goto block; 230 error = EINTR; 231 232 if (mtx && !dropmutex) { 233 if (spinmutex) 234 lck_mtx_lock_spin(mtx); 235 else 236 lck_mtx_lock(mtx); 237 } 238 goto out; 239 } 240 } 241 242 243block: 244 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) { 245 ut->uu_continuation = continuation; 246 ut->uu_pri = pri; 247 ut->uu_timo = abstime? 1: 0; 248 ut->uu_mtx = mtx; 249 (void) thread_block(_sleep_continue); 250 /* NOTREACHED */ 251 } 252 253 wait_result = thread_block(THREAD_CONTINUE_NULL); 254 255 if (mtx && !dropmutex) { 256 if (spinmutex) 257 lck_mtx_lock_spin(mtx); 258 else 259 lck_mtx_lock(mtx); 260 } 261 } 262 263 switch (wait_result) { 264 case THREAD_TIMED_OUT: 265 error = EWOULDBLOCK; 266 break; 267 case THREAD_AWAKENED: 268 /* 269 * Posix implies any signal should be delivered 270 * first, regardless of whether awakened due 271 * to receiving event. 272 */ 273 if (catch != THREAD_ABORTSAFE) 274 break; 275 /* else fall through */ 276 case THREAD_INTERRUPTED: 277 if (catch == THREAD_ABORTSAFE) { 278 if (thread_should_abort(self)) { 279 error = EINTR; 280 } else if (SHOULDissignal(p, ut)) { 281 if ((sig = CURSIG(p)) != 0) { 282 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 283 error = EINTR; 284 else 285 error = ERESTART; 286 } 287 if (thread_should_abort(self)) { 288 error = EINTR; 289 } 290 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { 291 /* due to thread cancel */ 292 error = EINTR; 293 } 294 } else 295 error = EINTR; 296 break; 297 } 298out: 299 if (error == EINTR || error == ERESTART) 300 act_set_astbsd(self); 301 ut->uu_wchan = NULL; 302 ut->uu_wmesg = NULL; 303 304 return (error); 305} 306 307int 308sleep( 309 void *chan, 310 int pri) 311{ 312 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0); 313} 314 315int 316msleep0( 317 void *chan, 318 lck_mtx_t *mtx, 319 int pri, 320 const char *wmsg, 321 int timo, 322 int (*continuation)(int)) 323{ 324 u_int64_t abstime = 0; 325 326 if (timo) 327 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); 328 329 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx); 330} 331 332int 333msleep( 334 void *chan, 335 lck_mtx_t *mtx, 336 int pri, 337 const char *wmsg, 338 struct timespec *ts) 339{ 340 u_int64_t abstime = 0; 341 342 if (ts && (ts->tv_sec || ts->tv_nsec)) { 343 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime ); 344 clock_absolutetime_interval_to_deadline( abstime, &abstime ); 345 } 346 347 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx); 348} 349 350int 351msleep1( 352 void *chan, 353 lck_mtx_t *mtx, 354 int pri, 355 const char *wmsg, 356 u_int64_t abstime) 357{ 358 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx); 359} 360 361int 362tsleep( 363 void *chan, 364 int pri, 365 const char *wmsg, 366 int timo) 367{ 368 u_int64_t abstime = 0; 369 370 if (timo) 371 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); 372 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0); 373} 374 375int 376tsleep0( 377 void *chan, 378 int pri, 379 const char *wmsg, 380 int timo, 381 int (*continuation)(int)) 382{ 383 u_int64_t abstime = 0; 384 385 if (timo) 386 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); 387 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); 388} 389 390int 391tsleep1( 392 void *chan, 393 int pri, 394 const char *wmsg, 395 u_int64_t abstime, 396 int (*continuation)(int)) 397{ 398 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); 399} 400 401/* 402 * Wake up all processes sleeping on chan. 403 */ 404void 405wakeup(void *chan) 406{ 407 thread_wakeup((caddr_t)chan); 408} 409 410/* 411 * Wake up the first process sleeping on chan. 412 * 413 * Be very sure that the first process is really 414 * the right one to wakeup. 415 */ 416void 417wakeup_one(caddr_t chan) 418{ 419 thread_wakeup_one((caddr_t)chan); 420} 421 422/* 423 * Compute the priority of a process when running in user mode. 424 * Arrange to reschedule if the resulting priority is better 425 * than that of the current process. 426 */ 427void 428resetpriority(struct proc *p) 429{ 430 (void)task_importance(p->task, -p->p_nice); 431} 432 433struct loadavg averunnable = 434 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 435/* 436 * Constants for averages over 1, 5, and 15 minutes 437 * when sampling at 5 second intervals. 438 */ 439static fixpt_t cexp[3] = { 440 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */ 441 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */ 442 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */ 443}; 444 445void 446compute_averunnable(void *arg) 447{ 448 unsigned int nrun = *(unsigned int *)arg; 449 struct loadavg *avg = &averunnable; 450 int i; 451 452 for (i = 0; i < 3; i++) 453 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 454 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 455} 456