kern_time.c revision 1.154
1/* $NetBSD: kern_time.c,v 1.154 2008/10/15 06:51:20 wrstuden Exp $ */ 2 3/*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63#include <sys/cdefs.h> 64__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.154 2008/10/15 06:51:20 wrstuden Exp $"); 65 66#include <sys/param.h> 67#include <sys/resourcevar.h> 68#include <sys/kernel.h> 69#include <sys/systm.h> 70#include <sys/proc.h> 71#include <sys/vnode.h> 72#include <sys/signalvar.h> 73#include <sys/syslog.h> 74#include <sys/timetc.h> 75#include <sys/timex.h> 76#include <sys/kauth.h> 77#include <sys/mount.h> 78#include <sys/sa.h> 79#include <sys/savar.h> 80#include <sys/syscallargs.h> 81#include <sys/cpu.h> 82 83#include <uvm/uvm_extern.h> 84 85#include "opt_sa.h" 86 87static void timer_intr(void *); 88static void itimerfire(struct ptimer *); 89static void itimerfree(struct ptimers *, int); 90 91kmutex_t timer_lock; 92 93static void *timer_sih; 94static TAILQ_HEAD(, ptimer) timer_queue; 95 96POOL_INIT(ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 97 &pool_allocator_nointr, IPL_NONE); 98POOL_INIT(ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 99 &pool_allocator_nointr, IPL_NONE); 100 101/* 102 * Initialize timekeeping. 103 */ 104void 105time_init(void) 106{ 107 108 /* nothing yet */ 109} 110 111void 112time_init2(void) 113{ 114 115 TAILQ_INIT(&timer_queue); 116 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 117 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 118 timer_intr, NULL); 119} 120 121/* Time of day and interval timer support. 122 * 123 * These routines provide the kernel entry points to get and set 124 * the time-of-day and per-process interval timers. Subroutines 125 * here provide support for adding and subtracting timeval structures 126 * and decrementing interval timers, optionally reloading the interval 127 * timers when they expire. 128 */ 129 130/* This function is used by clock_settime and settimeofday */ 131static int 132settime1(struct proc *p, struct timespec *ts, bool check_kauth) 133{ 134 struct timeval delta, tv; 135 struct timeval now; 136 struct timespec ts1; 137 struct bintime btdelta; 138 lwp_t *l; 139 int s; 140 141 TIMESPEC_TO_TIMEVAL(&tv, ts); 142 143 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 144 s = splclock(); 145 microtime(&now); 146 timersub(&tv, &now, &delta); 147 148 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 149 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, ts, &delta, 150 KAUTH_ARG(check_kauth ? false : true)) != 0) { 151 splx(s); 152 return (EPERM); 153 } 154 155#ifdef notyet 156 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 157 splx(s); 158 return (EPERM); 159 } 160#endif 161 162 TIMEVAL_TO_TIMESPEC(&tv, &ts1); 163 tc_setclock(&ts1); 164 165 timeradd(&boottime, &delta, &boottime); 166 167 /* 168 * XXXSMP: There is a short race between setting the time above 169 * and adjusting LWP's run times. Fixing this properly means 170 * pausing all CPUs while we adjust the clock. 171 */ 172 timeval2bintime(&delta, &btdelta); 173 mutex_enter(proc_lock); 174 LIST_FOREACH(l, &alllwp, l_list) { 175 lwp_lock(l); 176 bintime_add(&l->l_stime, &btdelta); 177 lwp_unlock(l); 178 } 179 mutex_exit(proc_lock); 180 resettodr(); 181 splx(s); 182 183 return (0); 184} 185 186int 187settime(struct proc *p, struct timespec *ts) 188{ 189 return (settime1(p, ts, true)); 190} 191 192/* ARGSUSED */ 193int 194sys_clock_gettime(struct lwp *l, const struct sys_clock_gettime_args *uap, 195 register_t *retval) 196{ 197 /* { 198 syscallarg(clockid_t) clock_id; 199 syscallarg(struct timespec *) tp; 200 } */ 201 clockid_t clock_id; 202 struct timespec ats; 203 204 clock_id = SCARG(uap, clock_id); 205 switch (clock_id) { 206 case CLOCK_REALTIME: 207 nanotime(&ats); 208 break; 209 case CLOCK_MONOTONIC: 210 nanouptime(&ats); 211 break; 212 default: 213 return (EINVAL); 214 } 215 216 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 217} 218 219/* ARGSUSED */ 220int 221sys_clock_settime(struct lwp *l, const struct sys_clock_settime_args *uap, 222 register_t *retval) 223{ 224 /* { 225 syscallarg(clockid_t) clock_id; 226 syscallarg(const struct timespec *) tp; 227 } */ 228 229 return clock_settime1(l->l_proc, SCARG(uap, clock_id), SCARG(uap, tp), 230 true); 231} 232 233 234int 235clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 236 bool check_kauth) 237{ 238 struct timespec ats; 239 int error; 240 241 if ((error = copyin(tp, &ats, sizeof(ats))) != 0) 242 return (error); 243 244 switch (clock_id) { 245 case CLOCK_REALTIME: 246 if ((error = settime1(p, &ats, check_kauth)) != 0) 247 return (error); 248 break; 249 case CLOCK_MONOTONIC: 250 return (EINVAL); /* read-only clock */ 251 default: 252 return (EINVAL); 253 } 254 255 return 0; 256} 257 258int 259sys_clock_getres(struct lwp *l, const struct sys_clock_getres_args *uap, 260 register_t *retval) 261{ 262 /* { 263 syscallarg(clockid_t) clock_id; 264 syscallarg(struct timespec *) tp; 265 } */ 266 clockid_t clock_id; 267 struct timespec ts; 268 int error = 0; 269 270 clock_id = SCARG(uap, clock_id); 271 switch (clock_id) { 272 case CLOCK_REALTIME: 273 case CLOCK_MONOTONIC: 274 ts.tv_sec = 0; 275 if (tc_getfrequency() > 1000000000) 276 ts.tv_nsec = 1; 277 else 278 ts.tv_nsec = 1000000000 / tc_getfrequency(); 279 break; 280 default: 281 return (EINVAL); 282 } 283 284 if (SCARG(uap, tp)) 285 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 286 287 return error; 288} 289 290/* ARGSUSED */ 291int 292sys_nanosleep(struct lwp *l, const struct sys_nanosleep_args *uap, 293 register_t *retval) 294{ 295 /* { 296 syscallarg(struct timespec *) rqtp; 297 syscallarg(struct timespec *) rmtp; 298 } */ 299 struct timespec rmt, rqt; 300 int error, error1; 301 302 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 303 if (error) 304 return (error); 305 306 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL); 307 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 308 return error; 309 310 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 311 return error1 ? error1 : error; 312} 313 314int 315nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt) 316{ 317 struct timespec rmtstart; 318 int error, timo; 319 320 if (itimespecfix(rqt)) 321 return (EINVAL); 322 323 timo = tstohz(rqt); 324 /* 325 * Avoid inadvertantly sleeping forever 326 */ 327 if (timo == 0) 328 timo = 1; 329 getnanouptime(&rmtstart); 330again: 331 error = kpause("nanoslp", true, timo, NULL); 332 if (rmt != NULL || error == 0) { 333 struct timespec rmtend; 334 struct timespec t0; 335 struct timespec *t; 336 337 getnanouptime(&rmtend); 338 t = (rmt != NULL) ? rmt : &t0; 339 timespecsub(&rmtend, &rmtstart, t); 340 timespecsub(rqt, t, t); 341 if (t->tv_sec < 0) 342 timespecclear(t); 343 if (error == 0) { 344 timo = tstohz(t); 345 if (timo > 0) 346 goto again; 347 } 348 } 349 350 if (error == ERESTART) 351 error = EINTR; 352 if (error == EWOULDBLOCK) 353 error = 0; 354 355 return error; 356} 357 358/* ARGSUSED */ 359int 360sys_gettimeofday(struct lwp *l, const struct sys_gettimeofday_args *uap, 361 register_t *retval) 362{ 363 /* { 364 syscallarg(struct timeval *) tp; 365 syscallarg(void *) tzp; really "struct timezone *"; 366 } */ 367 struct timeval atv; 368 int error = 0; 369 struct timezone tzfake; 370 371 if (SCARG(uap, tp)) { 372 microtime(&atv); 373 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 374 if (error) 375 return (error); 376 } 377 if (SCARG(uap, tzp)) { 378 /* 379 * NetBSD has no kernel notion of time zone, so we just 380 * fake up a timezone struct and return it if demanded. 381 */ 382 tzfake.tz_minuteswest = 0; 383 tzfake.tz_dsttime = 0; 384 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 385 } 386 return (error); 387} 388 389/* ARGSUSED */ 390int 391sys_settimeofday(struct lwp *l, const struct sys_settimeofday_args *uap, 392 register_t *retval) 393{ 394 /* { 395 syscallarg(const struct timeval *) tv; 396 syscallarg(const void *) tzp; really "const struct timezone *"; 397 } */ 398 399 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 400} 401 402int 403settimeofday1(const struct timeval *utv, bool userspace, 404 const void *utzp, struct lwp *l, bool check_kauth) 405{ 406 struct timeval atv; 407 struct timespec ts; 408 int error; 409 410 /* Verify all parameters before changing time. */ 411 412 /* 413 * NetBSD has no kernel notion of time zone, and only an 414 * obsolete program would try to set it, so we log a warning. 415 */ 416 if (utzp) 417 log(LOG_WARNING, "pid %d attempted to set the " 418 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 419 420 if (utv == NULL) 421 return 0; 422 423 if (userspace) { 424 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 425 return error; 426 utv = &atv; 427 } 428 429 TIMEVAL_TO_TIMESPEC(utv, &ts); 430 return settime1(l->l_proc, &ts, check_kauth); 431} 432 433int time_adjusted; /* set if an adjustment is made */ 434 435/* ARGSUSED */ 436int 437sys_adjtime(struct lwp *l, const struct sys_adjtime_args *uap, 438 register_t *retval) 439{ 440 /* { 441 syscallarg(const struct timeval *) delta; 442 syscallarg(struct timeval *) olddelta; 443 } */ 444 int error; 445 446 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 447 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 448 return (error); 449 450 return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), l->l_proc); 451} 452 453int 454adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 455{ 456 struct timeval atv; 457 int error = 0; 458 459 extern int64_t time_adjtime; /* in kern_ntptime.c */ 460 461 if (olddelta) { 462 mutex_spin_enter(&timecounter_lock); 463 atv.tv_sec = time_adjtime / 1000000; 464 atv.tv_usec = time_adjtime % 1000000; 465 mutex_spin_exit(&timecounter_lock); 466 if (atv.tv_usec < 0) { 467 atv.tv_usec += 1000000; 468 atv.tv_sec--; 469 } 470 error = copyout(&atv, olddelta, sizeof(struct timeval)); 471 if (error) 472 return (error); 473 } 474 475 if (delta) { 476 error = copyin(delta, &atv, sizeof(struct timeval)); 477 if (error) 478 return (error); 479 480 mutex_spin_enter(&timecounter_lock); 481 time_adjtime = (int64_t)atv.tv_sec * 1000000 + 482 atv.tv_usec; 483 if (time_adjtime) { 484 /* We need to save the system time during shutdown */ 485 time_adjusted |= 1; 486 } 487 mutex_spin_exit(&timecounter_lock); 488 } 489 490 return error; 491} 492 493/* 494 * Interval timer support. Both the BSD getitimer() family and the POSIX 495 * timer_*() family of routines are supported. 496 * 497 * All timers are kept in an array pointed to by p_timers, which is 498 * allocated on demand - many processes don't use timers at all. The 499 * first three elements in this array are reserved for the BSD timers: 500 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element 501 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create() 502 * syscall. 503 * 504 * Realtime timers are kept in the ptimer structure as an absolute 505 * time; virtual time timers are kept as a linked list of deltas. 506 * Virtual time timers are processed in the hardclock() routine of 507 * kern_clock.c. The real time timer is processed by a callout 508 * routine, called from the softclock() routine. Since a callout may 509 * be delayed in real time due to interrupt processing in the system, 510 * it is possible for the real time timeout routine (realtimeexpire, 511 * given below), to be delayed in real time past when it is supposed 512 * to occur. It does not suffice, therefore, to reload the real timer 513 * .it_value from the real time timers .it_interval. Rather, we 514 * compute the next time in absolute time the timer should go off. */ 515 516/* Allocate a POSIX realtime timer. */ 517int 518sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 519 register_t *retval) 520{ 521 /* { 522 syscallarg(clockid_t) clock_id; 523 syscallarg(struct sigevent *) evp; 524 syscallarg(timer_t *) timerid; 525 } */ 526 527 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 528 SCARG(uap, evp), copyin, l); 529} 530 531int 532timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 533 copyin_t fetch_event, struct lwp *l) 534{ 535 int error; 536 timer_t timerid; 537 struct ptimers *pts; 538 struct ptimer *pt; 539 struct proc *p; 540 541 p = l->l_proc; 542 543 if (id < CLOCK_REALTIME || id > CLOCK_PROF) 544 return (EINVAL); 545 546 if ((pts = p->p_timers) == NULL) 547 pts = timers_alloc(p); 548 549 pt = pool_get(&ptimer_pool, PR_WAITOK); 550 if (evp != NULL) { 551 if (((error = 552 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 553 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 554 (pt->pt_ev.sigev_notify > SIGEV_SA))) { 555 pool_put(&ptimer_pool, pt); 556 return (error ? error : EINVAL); 557 } 558 } 559 560 /* Find a free timer slot, skipping those reserved for setitimer(). */ 561 mutex_spin_enter(&timer_lock); 562 for (timerid = 3; timerid < TIMER_MAX; timerid++) 563 if (pts->pts_timers[timerid] == NULL) 564 break; 565 if (timerid == TIMER_MAX) { 566 mutex_spin_exit(&timer_lock); 567 pool_put(&ptimer_pool, pt); 568 return EAGAIN; 569 } 570 if (evp == NULL) { 571 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 572 switch (id) { 573 case CLOCK_REALTIME: 574 pt->pt_ev.sigev_signo = SIGALRM; 575 break; 576 case CLOCK_VIRTUAL: 577 pt->pt_ev.sigev_signo = SIGVTALRM; 578 break; 579 case CLOCK_PROF: 580 pt->pt_ev.sigev_signo = SIGPROF; 581 break; 582 } 583 pt->pt_ev.sigev_value.sival_int = timerid; 584 } 585 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 586 pt->pt_info.ksi_errno = 0; 587 pt->pt_info.ksi_code = 0; 588 pt->pt_info.ksi_pid = p->p_pid; 589 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 590 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 591 pt->pt_type = id; 592 pt->pt_proc = p; 593 pt->pt_overruns = 0; 594 pt->pt_poverruns = 0; 595 pt->pt_entry = timerid; 596 pt->pt_queued = false; 597 timespecclear(&pt->pt_time.it_value); 598 if (id == CLOCK_REALTIME) 599 callout_init(&pt->pt_ch, 0); 600 else 601 pt->pt_active = 0; 602 603 pts->pts_timers[timerid] = pt; 604 mutex_spin_exit(&timer_lock); 605 606 return copyout(&timerid, tid, sizeof(timerid)); 607} 608 609/* Delete a POSIX realtime timer */ 610int 611sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 612 register_t *retval) 613{ 614 /* { 615 syscallarg(timer_t) timerid; 616 } */ 617 struct proc *p = l->l_proc; 618 timer_t timerid; 619 struct ptimers *pts; 620 struct ptimer *pt, *ptn; 621 622 timerid = SCARG(uap, timerid); 623 pts = p->p_timers; 624 625 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 626 return (EINVAL); 627 628 mutex_spin_enter(&timer_lock); 629 if ((pt = pts->pts_timers[timerid]) == NULL) { 630 mutex_spin_exit(&timer_lock); 631 return (EINVAL); 632 } 633 if (pt->pt_type != CLOCK_REALTIME) { 634 if (pt->pt_active) { 635 ptn = LIST_NEXT(pt, pt_list); 636 LIST_REMOVE(pt, pt_list); 637 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 638 timespecadd(&pt->pt_time.it_value, 639 &ptn->pt_time.it_value, 640 &ptn->pt_time.it_value); 641 pt->pt_active = 0; 642 } 643 } 644 itimerfree(pts, timerid); 645 646 return (0); 647} 648 649/* 650 * Set up the given timer. The value in pt->pt_time.it_value is taken 651 * to be an absolute time for CLOCK_REALTIME timers and a relative 652 * time for virtual timers. 653 * Must be called at splclock(). 654 */ 655void 656timer_settime(struct ptimer *pt) 657{ 658 struct ptimer *ptn, *pptn; 659 struct ptlist *ptl; 660 661 KASSERT(mutex_owned(&timer_lock)); 662 663 if (pt->pt_type == CLOCK_REALTIME) { 664 callout_stop(&pt->pt_ch); 665 if (timespecisset(&pt->pt_time.it_value)) { 666 /* 667 * Don't need to check tshzto() return value, here. 668 * callout_reset() does it for us. 669 */ 670 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 671 realtimerexpire, pt); 672 } 673 } else { 674 if (pt->pt_active) { 675 ptn = LIST_NEXT(pt, pt_list); 676 LIST_REMOVE(pt, pt_list); 677 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 678 timespecadd(&pt->pt_time.it_value, 679 &ptn->pt_time.it_value, 680 &ptn->pt_time.it_value); 681 } 682 if (timespecisset(&pt->pt_time.it_value)) { 683 if (pt->pt_type == CLOCK_VIRTUAL) 684 ptl = &pt->pt_proc->p_timers->pts_virtual; 685 else 686 ptl = &pt->pt_proc->p_timers->pts_prof; 687 688 for (ptn = LIST_FIRST(ptl), pptn = NULL; 689 ptn && timespeccmp(&pt->pt_time.it_value, 690 &ptn->pt_time.it_value, >); 691 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 692 timespecsub(&pt->pt_time.it_value, 693 &ptn->pt_time.it_value, 694 &pt->pt_time.it_value); 695 696 if (pptn) 697 LIST_INSERT_AFTER(pptn, pt, pt_list); 698 else 699 LIST_INSERT_HEAD(ptl, pt, pt_list); 700 701 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 702 timespecsub(&ptn->pt_time.it_value, 703 &pt->pt_time.it_value, 704 &ptn->pt_time.it_value); 705 706 pt->pt_active = 1; 707 } else 708 pt->pt_active = 0; 709 } 710} 711 712void 713timer_gettime(struct ptimer *pt, struct itimerspec *aits) 714{ 715 struct timespec now; 716 struct ptimer *ptn; 717 718 KASSERT(mutex_owned(&timer_lock)); 719 720 *aits = pt->pt_time; 721 if (pt->pt_type == CLOCK_REALTIME) { 722 /* 723 * Convert from absolute to relative time in .it_value 724 * part of real time timer. If time for real time 725 * timer has passed return 0, else return difference 726 * between current time and time for the timer to go 727 * off. 728 */ 729 if (timespecisset(&aits->it_value)) { 730 getnanotime(&now); 731 if (timespeccmp(&aits->it_value, &now, <)) 732 timespecclear(&aits->it_value); 733 else 734 timespecsub(&aits->it_value, &now, 735 &aits->it_value); 736 } 737 } else if (pt->pt_active) { 738 if (pt->pt_type == CLOCK_VIRTUAL) 739 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 740 else 741 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 742 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 743 timespecadd(&aits->it_value, 744 &ptn->pt_time.it_value, &aits->it_value); 745 KASSERT(ptn != NULL); /* pt should be findable on the list */ 746 } else 747 timespecclear(&aits->it_value); 748} 749 750 751 752/* Set and arm a POSIX realtime timer */ 753int 754sys_timer_settime(struct lwp *l, const struct sys_timer_settime_args *uap, 755 register_t *retval) 756{ 757 /* { 758 syscallarg(timer_t) timerid; 759 syscallarg(int) flags; 760 syscallarg(const struct itimerspec *) value; 761 syscallarg(struct itimerspec *) ovalue; 762 } */ 763 int error; 764 struct itimerspec value, ovalue, *ovp = NULL; 765 766 if ((error = copyin(SCARG(uap, value), &value, 767 sizeof(struct itimerspec))) != 0) 768 return (error); 769 770 if (SCARG(uap, ovalue)) 771 ovp = &ovalue; 772 773 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 774 SCARG(uap, flags), l->l_proc)) != 0) 775 return error; 776 777 if (ovp) 778 return copyout(&ovalue, SCARG(uap, ovalue), 779 sizeof(struct itimerspec)); 780 return 0; 781} 782 783int 784dotimer_settime(int timerid, struct itimerspec *value, 785 struct itimerspec *ovalue, int flags, struct proc *p) 786{ 787 struct timespec now; 788 struct itimerspec val, oval; 789 struct ptimers *pts; 790 struct ptimer *pt; 791 792 pts = p->p_timers; 793 794 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 795 return EINVAL; 796 val = *value; 797 if (itimespecfix(&val.it_value) || itimespecfix(&val.it_interval)) 798 return EINVAL; 799 800 mutex_spin_enter(&timer_lock); 801 if ((pt = pts->pts_timers[timerid]) == NULL) { 802 mutex_spin_exit(&timer_lock); 803 return EINVAL; 804 } 805 806 oval = pt->pt_time; 807 pt->pt_time = val; 808 809 /* 810 * If we've been passed a relative time for a realtime timer, 811 * convert it to absolute; if an absolute time for a virtual 812 * timer, convert it to relative and make sure we don't set it 813 * to zero, which would cancel the timer, or let it go 814 * negative, which would confuse the comparison tests. 815 */ 816 if (timespecisset(&pt->pt_time.it_value)) { 817 if (pt->pt_type == CLOCK_REALTIME) { 818 if ((flags & TIMER_ABSTIME) == 0) { 819 getnanotime(&now); 820 timespecadd(&pt->pt_time.it_value, &now, 821 &pt->pt_time.it_value); 822 } 823 } else { 824 if ((flags & TIMER_ABSTIME) != 0) { 825 getnanotime(&now); 826 timespecsub(&pt->pt_time.it_value, &now, 827 &pt->pt_time.it_value); 828 if (!timespecisset(&pt->pt_time.it_value) || 829 pt->pt_time.it_value.tv_sec < 0) { 830 pt->pt_time.it_value.tv_sec = 0; 831 pt->pt_time.it_value.tv_nsec = 1; 832 } 833 } 834 } 835 } 836 837 timer_settime(pt); 838 mutex_spin_exit(&timer_lock); 839 840 if (ovalue) 841 *ovalue = oval; 842 843 return (0); 844} 845 846/* Return the time remaining until a POSIX timer fires. */ 847int 848sys_timer_gettime(struct lwp *l, const struct sys_timer_gettime_args *uap, 849 register_t *retval) 850{ 851 /* { 852 syscallarg(timer_t) timerid; 853 syscallarg(struct itimerspec *) value; 854 } */ 855 struct itimerspec its; 856 int error; 857 858 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 859 &its)) != 0) 860 return error; 861 862 return copyout(&its, SCARG(uap, value), sizeof(its)); 863} 864 865int 866dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 867{ 868 struct ptimer *pt; 869 struct ptimers *pts; 870 871 pts = p->p_timers; 872 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 873 return (EINVAL); 874 mutex_spin_enter(&timer_lock); 875 if ((pt = pts->pts_timers[timerid]) == NULL) { 876 mutex_spin_exit(&timer_lock); 877 return (EINVAL); 878 } 879 timer_gettime(pt, its); 880 mutex_spin_exit(&timer_lock); 881 882 return 0; 883} 884 885/* 886 * Return the count of the number of times a periodic timer expired 887 * while a notification was already pending. The counter is reset when 888 * a timer expires and a notification can be posted. 889 */ 890int 891sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 892 register_t *retval) 893{ 894 /* { 895 syscallarg(timer_t) timerid; 896 } */ 897 struct proc *p = l->l_proc; 898 struct ptimers *pts; 899 int timerid; 900 struct ptimer *pt; 901 902 timerid = SCARG(uap, timerid); 903 904 pts = p->p_timers; 905 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 906 return (EINVAL); 907 mutex_spin_enter(&timer_lock); 908 if ((pt = pts->pts_timers[timerid]) == NULL) { 909 mutex_spin_exit(&timer_lock); 910 return (EINVAL); 911 } 912 *retval = pt->pt_poverruns; 913 mutex_spin_exit(&timer_lock); 914 915 return (0); 916} 917 918#ifdef KERN_SA 919/* Glue function that triggers an upcall; called from userret(). */ 920void 921timerupcall(struct lwp *l) 922{ 923 struct ptimers *pt = l->l_proc->p_timers; 924 struct proc *p = l->l_proc; 925 unsigned int i, fired, done; 926 927 KDASSERT(l->l_proc->p_sa); 928 /* Bail out if we do not own the virtual processor */ 929 if (l->l_savp->savp_lwp != l) 930 return ; 931 932 mutex_enter(p->p_lock); 933 934 fired = pt->pts_fired; 935 done = 0; 936 while ((i = ffs(fired)) != 0) { 937 siginfo_t *si; 938 int mask = 1 << --i; 939 int f; 940 941 f = ~l->l_pflag & LP_SA_NOBLOCK; 942 l->l_pflag |= LP_SA_NOBLOCK; 943 si = siginfo_alloc(PR_WAITOK); 944 si->_info = pt->pts_timers[i]->pt_info.ksi_info; 945 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l, 946 sizeof(*si), si, siginfo_free) != 0) { 947 siginfo_free(si); 948 /* XXX What do we do here?? */ 949 } else 950 done |= mask; 951 fired &= ~mask; 952 l->l_pflag ^= f; 953 } 954 pt->pts_fired &= ~done; 955 if (pt->pts_fired == 0) 956 l->l_proc->p_timerpend = 0; 957 958 mutex_exit(p->p_lock); 959} 960#endif /* KERN_SA */ 961 962/* 963 * Real interval timer expired: 964 * send process whose timer expired an alarm signal. 965 * If time is not set up to reload, then just return. 966 * Else compute next time timer should go off which is > current time. 967 * This is where delay in processing this timeout causes multiple 968 * SIGALRM calls to be compressed into one. 969 */ 970void 971realtimerexpire(void *arg) 972{ 973 uint64_t last_val, next_val, interval, now_ms; 974 struct timespec now, next; 975 struct ptimer *pt; 976 int backwards; 977 978 pt = arg; 979 980 mutex_spin_enter(&timer_lock); 981 itimerfire(pt); 982 983 if (!timespecisset(&pt->pt_time.it_interval)) { 984 timespecclear(&pt->pt_time.it_value); 985 mutex_spin_exit(&timer_lock); 986 return; 987 } 988 989 getnanotime(&now); 990 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 991 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 992 /* Handle the easy case of non-overflown timers first. */ 993 if (!backwards && timespeccmp(&next, &now, >)) { 994 pt->pt_time.it_value = next; 995 } else { 996 now_ms = timespec2ns(&now); 997 last_val = timespec2ns(&pt->pt_time.it_value); 998 interval = timespec2ns(&pt->pt_time.it_interval); 999 1000 next_val = now_ms + 1001 (now_ms - last_val + interval - 1) % interval; 1002 1003 if (backwards) 1004 next_val += interval; 1005 else 1006 pt->pt_overruns += (now_ms - last_val) / interval; 1007 1008 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1009 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1010 } 1011 1012 /* 1013 * Don't need to check tshzto() return value, here. 1014 * callout_reset() does it for us. 1015 */ 1016 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 1017 realtimerexpire, pt); 1018 mutex_spin_exit(&timer_lock); 1019} 1020 1021/* BSD routine to get the value of an interval timer. */ 1022/* ARGSUSED */ 1023int 1024sys_getitimer(struct lwp *l, const struct sys_getitimer_args *uap, 1025 register_t *retval) 1026{ 1027 /* { 1028 syscallarg(int) which; 1029 syscallarg(struct itimerval *) itv; 1030 } */ 1031 struct proc *p = l->l_proc; 1032 struct itimerval aitv; 1033 int error; 1034 1035 error = dogetitimer(p, SCARG(uap, which), &aitv); 1036 if (error) 1037 return error; 1038 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1039} 1040 1041int 1042dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1043{ 1044 struct ptimers *pts; 1045 struct ptimer *pt; 1046 struct itimerspec its; 1047 1048 if ((u_int)which > ITIMER_PROF) 1049 return (EINVAL); 1050 1051 mutex_spin_enter(&timer_lock); 1052 pts = p->p_timers; 1053 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1054 timerclear(&itvp->it_value); 1055 timerclear(&itvp->it_interval); 1056 } else { 1057 timer_gettime(pt, &its); 1058 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1059 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1060 } 1061 mutex_spin_exit(&timer_lock); 1062 1063 return 0; 1064} 1065 1066/* BSD routine to set/arm an interval timer. */ 1067/* ARGSUSED */ 1068int 1069sys_setitimer(struct lwp *l, const struct sys_setitimer_args *uap, 1070 register_t *retval) 1071{ 1072 /* { 1073 syscallarg(int) which; 1074 syscallarg(const struct itimerval *) itv; 1075 syscallarg(struct itimerval *) oitv; 1076 } */ 1077 struct proc *p = l->l_proc; 1078 int which = SCARG(uap, which); 1079 struct sys_getitimer_args getargs; 1080 const struct itimerval *itvp; 1081 struct itimerval aitv; 1082 int error; 1083 1084 if ((u_int)which > ITIMER_PROF) 1085 return (EINVAL); 1086 itvp = SCARG(uap, itv); 1087 if (itvp && 1088 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0)) 1089 return (error); 1090 if (SCARG(uap, oitv) != NULL) { 1091 SCARG(&getargs, which) = which; 1092 SCARG(&getargs, itv) = SCARG(uap, oitv); 1093 if ((error = sys_getitimer(l, &getargs, retval)) != 0) 1094 return (error); 1095 } 1096 if (itvp == 0) 1097 return (0); 1098 1099 return dosetitimer(p, which, &aitv); 1100} 1101 1102int 1103dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1104{ 1105 struct timespec now; 1106 struct ptimers *pts; 1107 struct ptimer *pt, *spare; 1108 1109 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1110 return (EINVAL); 1111 1112 /* 1113 * Don't bother allocating data structures if the process just 1114 * wants to clear the timer. 1115 */ 1116 spare = NULL; 1117 pts = p->p_timers; 1118 retry: 1119 if (!timerisset(&itvp->it_value) && (pts == NULL || 1120 pts->pts_timers[which] == NULL)) 1121 return (0); 1122 if (pts == NULL) 1123 pts = timers_alloc(p); 1124 mutex_spin_enter(&timer_lock); 1125 pt = pts->pts_timers[which]; 1126 if (pt == NULL) { 1127 if (spare == NULL) { 1128 mutex_spin_exit(&timer_lock); 1129 spare = pool_get(&ptimer_pool, PR_WAITOK); 1130 goto retry; 1131 } 1132 pt = spare; 1133 spare = NULL; 1134 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1135 pt->pt_ev.sigev_value.sival_int = which; 1136 pt->pt_overruns = 0; 1137 pt->pt_proc = p; 1138 pt->pt_type = which; 1139 pt->pt_entry = which; 1140 pt->pt_queued = false; 1141 if (pt->pt_type == CLOCK_REALTIME) 1142 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1143 else 1144 pt->pt_active = 0; 1145 1146 switch (which) { 1147 case ITIMER_REAL: 1148 pt->pt_ev.sigev_signo = SIGALRM; 1149 break; 1150 case ITIMER_VIRTUAL: 1151 pt->pt_ev.sigev_signo = SIGVTALRM; 1152 break; 1153 case ITIMER_PROF: 1154 pt->pt_ev.sigev_signo = SIGPROF; 1155 break; 1156 } 1157 pts->pts_timers[which] = pt; 1158 } 1159 1160 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1161 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1162 1163 if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) { 1164 /* Convert to absolute time */ 1165 /* XXX need to wrap in splclock for timecounters case? */ 1166 getnanotime(&now); 1167 timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value); 1168 } 1169 timer_settime(pt); 1170 mutex_spin_exit(&timer_lock); 1171 if (spare != NULL) 1172 pool_put(&ptimer_pool, spare); 1173 1174 return (0); 1175} 1176 1177/* Utility routines to manage the array of pointers to timers. */ 1178struct ptimers * 1179timers_alloc(struct proc *p) 1180{ 1181 struct ptimers *pts; 1182 int i; 1183 1184 pts = pool_get(&ptimers_pool, PR_WAITOK); 1185 LIST_INIT(&pts->pts_virtual); 1186 LIST_INIT(&pts->pts_prof); 1187 for (i = 0; i < TIMER_MAX; i++) 1188 pts->pts_timers[i] = NULL; 1189 pts->pts_fired = 0; 1190 mutex_spin_enter(&timer_lock); 1191 if (p->p_timers == NULL) { 1192 p->p_timers = pts; 1193 mutex_spin_exit(&timer_lock); 1194 return pts; 1195 } 1196 mutex_spin_exit(&timer_lock); 1197 pool_put(&ptimers_pool, pts); 1198 return p->p_timers; 1199} 1200 1201/* 1202 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1203 * then clean up all timers and free all the data structures. If 1204 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1205 * by timer_create(), not the BSD setitimer() timers, and only free the 1206 * structure if none of those remain. 1207 */ 1208void 1209timers_free(struct proc *p, int which) 1210{ 1211 struct ptimers *pts; 1212 struct ptimer *ptn; 1213 struct timespec ts; 1214 int i; 1215 1216 if (p->p_timers == NULL) 1217 return; 1218 1219 pts = p->p_timers; 1220 mutex_spin_enter(&timer_lock); 1221 if (which == TIMERS_ALL) { 1222 p->p_timers = NULL; 1223 i = 0; 1224 } else { 1225 timespecclear(&ts); 1226 for (ptn = LIST_FIRST(&pts->pts_virtual); 1227 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1228 ptn = LIST_NEXT(ptn, pt_list)) { 1229 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1230 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1231 } 1232 LIST_FIRST(&pts->pts_virtual) = NULL; 1233 if (ptn) { 1234 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1235 timespecadd(&ts, &ptn->pt_time.it_value, 1236 &ptn->pt_time.it_value); 1237 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1238 } 1239 timespecclear(&ts); 1240 for (ptn = LIST_FIRST(&pts->pts_prof); 1241 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1242 ptn = LIST_NEXT(ptn, pt_list)) { 1243 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1244 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1245 } 1246 LIST_FIRST(&pts->pts_prof) = NULL; 1247 if (ptn) { 1248 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1249 timespecadd(&ts, &ptn->pt_time.it_value, 1250 &ptn->pt_time.it_value); 1251 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1252 } 1253 i = 3; 1254 } 1255 for ( ; i < TIMER_MAX; i++) { 1256 if (pts->pts_timers[i] != NULL) { 1257 itimerfree(pts, i); 1258 mutex_spin_enter(&timer_lock); 1259 } 1260 } 1261 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1262 pts->pts_timers[2] == NULL) { 1263 p->p_timers = NULL; 1264 mutex_spin_exit(&timer_lock); 1265 pool_put(&ptimers_pool, pts); 1266 } else 1267 mutex_spin_exit(&timer_lock); 1268} 1269 1270static void 1271itimerfree(struct ptimers *pts, int index) 1272{ 1273 struct ptimer *pt; 1274 1275 KASSERT(mutex_owned(&timer_lock)); 1276 1277 pt = pts->pts_timers[index]; 1278 pts->pts_timers[index] = NULL; 1279 if (pt->pt_type == CLOCK_REALTIME) 1280 callout_halt(&pt->pt_ch, &timer_lock); 1281 else if (pt->pt_queued) 1282 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1283 mutex_spin_exit(&timer_lock); 1284 if (pt->pt_type == CLOCK_REALTIME) 1285 callout_destroy(&pt->pt_ch); 1286 pool_put(&ptimer_pool, pt); 1287} 1288 1289/* 1290 * Decrement an interval timer by a specified number 1291 * of nanoseconds, which must be less than a second, 1292 * i.e. < 1000000000. If the timer expires, then reload 1293 * it. In this case, carry over (nsec - old value) to 1294 * reduce the value reloaded into the timer so that 1295 * the timer does not drift. This routine assumes 1296 * that it is called in a context where the timers 1297 * on which it is operating cannot change in value. 1298 */ 1299static int 1300itimerdecr(struct ptimer *pt, int nsec) 1301{ 1302 struct itimerspec *itp; 1303 1304 KASSERT(mutex_owned(&timer_lock)); 1305 1306 itp = &pt->pt_time; 1307 if (itp->it_value.tv_nsec < nsec) { 1308 if (itp->it_value.tv_sec == 0) { 1309 /* expired, and already in next interval */ 1310 nsec -= itp->it_value.tv_nsec; 1311 goto expire; 1312 } 1313 itp->it_value.tv_nsec += 1000000000; 1314 itp->it_value.tv_sec--; 1315 } 1316 itp->it_value.tv_nsec -= nsec; 1317 nsec = 0; 1318 if (timespecisset(&itp->it_value)) 1319 return (1); 1320 /* expired, exactly at end of interval */ 1321expire: 1322 if (timespecisset(&itp->it_interval)) { 1323 itp->it_value = itp->it_interval; 1324 itp->it_value.tv_nsec -= nsec; 1325 if (itp->it_value.tv_nsec < 0) { 1326 itp->it_value.tv_nsec += 1000000000; 1327 itp->it_value.tv_sec--; 1328 } 1329 timer_settime(pt); 1330 } else 1331 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1332 return (0); 1333} 1334 1335static void 1336itimerfire(struct ptimer *pt) 1337{ 1338 1339 KASSERT(mutex_owned(&timer_lock)); 1340 1341 /* 1342 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1343 * XXX Relying on the clock interrupt is stupid. 1344 */ 1345 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) || 1346 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL && 1347 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued) 1348 return; 1349 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1350 pt->pt_queued = true; 1351 softint_schedule(timer_sih); 1352} 1353 1354void 1355timer_tick(lwp_t *l, bool user) 1356{ 1357 struct ptimers *pts; 1358 struct ptimer *pt; 1359 proc_t *p; 1360 1361 p = l->l_proc; 1362 if (p->p_timers == NULL) 1363 return; 1364 1365 mutex_spin_enter(&timer_lock); 1366 if ((pts = l->l_proc->p_timers) != NULL) { 1367 /* 1368 * Run current process's virtual and profile time, as needed. 1369 */ 1370 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1371 if (itimerdecr(pt, tick * 1000) == 0) 1372 itimerfire(pt); 1373 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1374 if (itimerdecr(pt, tick * 1000) == 0) 1375 itimerfire(pt); 1376 } 1377 mutex_spin_exit(&timer_lock); 1378} 1379 1380#ifdef KERN_SA 1381/* 1382 * timer_sa_intr: 1383 * 1384 * SIGEV_SA handling for timer_intr(). We are called (and return) 1385 * with the timer lock held. We know that the process had SA enabled 1386 * when this timer was enqueued. As timer_intr() is a soft interrupt 1387 * handler, SA should still be enabled by the time we get here. 1388 * 1389 * XXX Is it legit to lock p_lock and the lwp at this time? 1390 */ 1391static void 1392timer_sa_intr(struct ptimer *pt, proc_t *p) 1393{ 1394 unsigned int i; 1395 struct sadata_vp *vp; 1396 1397 /* Cause the process to generate an upcall when it returns. */ 1398 if (!p->p_timerpend) { 1399 /* 1400 * XXX stop signals can be processed inside tsleep, 1401 * which can be inside sa_yield's inner loop, which 1402 * makes testing for sa_idle alone insuffucent to 1403 * determine if we really should call setrunnable. 1404 */ 1405 pt->pt_poverruns = pt->pt_overruns; 1406 pt->pt_overruns = 0; 1407 i = 1 << pt->pt_entry; 1408 p->p_timers->pts_fired = i; 1409 p->p_timerpend = 1; 1410 1411 mutex_enter(p->p_lock); 1412 SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { 1413 lwp_lock(vp->savp_lwp); 1414 lwp_need_userret(vp->savp_lwp); 1415 if (vp->savp_lwp->l_flag & LW_SA_IDLE) { 1416 vp->savp_lwp->l_flag &= ~LW_SA_IDLE; 1417 lwp_unsleep(vp->savp_lwp, true); 1418 break; 1419 } 1420 lwp_unlock(vp->savp_lwp); 1421 } 1422 mutex_exit(p->p_lock); 1423 } else { 1424 i = 1 << pt->pt_entry; 1425 if ((p->p_timers->pts_fired & i) == 0) { 1426 pt->pt_poverruns = pt->pt_overruns; 1427 pt->pt_overruns = 0; 1428 p->p_timers->pts_fired |= i; 1429 } else 1430 pt->pt_overruns++; 1431 } 1432} 1433#endif /* KERN_SA */ 1434 1435static void 1436timer_intr(void *cookie) 1437{ 1438 ksiginfo_t ksi; 1439 struct ptimer *pt; 1440 proc_t *p; 1441 1442 mutex_spin_enter(&timer_lock); 1443 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1444 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1445 KASSERT(pt->pt_queued); 1446 pt->pt_queued = false; 1447 1448 if (pt->pt_proc->p_timers == NULL) { 1449 /* Process is dying. */ 1450 continue; 1451 } 1452 p = pt->pt_proc; 1453#ifdef KERN_SA 1454 if (pt->pt_ev.sigev_notify == SIGEV_SA) { 1455 timer_sa_intr(pt, p); 1456 continue; 1457 } 1458#endif /* KERN_SA */ 1459 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) 1460 continue; 1461 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1462 pt->pt_overruns++; 1463 continue; 1464 } 1465 1466 KSI_INIT(&ksi); 1467 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1468 ksi.ksi_code = SI_TIMER; 1469 ksi.ksi_value = pt->pt_ev.sigev_value; 1470 pt->pt_poverruns = pt->pt_overruns; 1471 pt->pt_overruns = 0; 1472 mutex_spin_exit(&timer_lock); 1473 1474 mutex_enter(proc_lock); 1475 kpsignal(p, &ksi, NULL); 1476 mutex_exit(proc_lock); 1477 1478 mutex_spin_enter(&timer_lock); 1479 } 1480 mutex_spin_exit(&timer_lock); 1481} 1482