kern_time.c revision 1.170
1/* $NetBSD: kern_time.c,v 1.170 2011/10/27 16:12:52 christos Exp $ */ 2 3/*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63#include <sys/cdefs.h> 64__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.170 2011/10/27 16:12:52 christos Exp $"); 65 66#include <sys/param.h> 67#include <sys/resourcevar.h> 68#include <sys/kernel.h> 69#include <sys/systm.h> 70#include <sys/proc.h> 71#include <sys/vnode.h> 72#include <sys/signalvar.h> 73#include <sys/syslog.h> 74#include <sys/timetc.h> 75#include <sys/timex.h> 76#include <sys/kauth.h> 77#include <sys/mount.h> 78#include <sys/sa.h> 79#include <sys/savar.h> 80#include <sys/syscallargs.h> 81#include <sys/cpu.h> 82 83#include "opt_sa.h" 84 85static void timer_intr(void *); 86static void itimerfire(struct ptimer *); 87static void itimerfree(struct ptimers *, int); 88 89kmutex_t timer_lock; 90 91static void *timer_sih; 92static TAILQ_HEAD(, ptimer) timer_queue; 93 94struct pool ptimer_pool, ptimers_pool; 95 96#define CLOCK_VIRTUAL_P(clockid) \ 97 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 98 99CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 100CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 101CTASSERT(ITIMER_PROF == CLOCK_PROF); 102CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 103 104/* 105 * Initialize timekeeping. 106 */ 107void 108time_init(void) 109{ 110 111 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 112 &pool_allocator_nointr, IPL_NONE); 113 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 114 &pool_allocator_nointr, IPL_NONE); 115} 116 117void 118time_init2(void) 119{ 120 121 TAILQ_INIT(&timer_queue); 122 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 123 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 124 timer_intr, NULL); 125} 126 127/* Time of day and interval timer support. 128 * 129 * These routines provide the kernel entry points to get and set 130 * the time-of-day and per-process interval timers. Subroutines 131 * here provide support for adding and subtracting timeval structures 132 * and decrementing interval timers, optionally reloading the interval 133 * timers when they expire. 134 */ 135 136/* This function is used by clock_settime and settimeofday */ 137static int 138settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 139{ 140 struct timespec delta, now; 141 int s; 142 143 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 144 s = splclock(); 145 nanotime(&now); 146 timespecsub(ts, &now, &delta); 147 148 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 149 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 150 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 151 splx(s); 152 return (EPERM); 153 } 154 155#ifdef notyet 156 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 157 splx(s); 158 return (EPERM); 159 } 160#endif 161 162 tc_setclock(ts); 163 164 timespecadd(&boottime, &delta, &boottime); 165 166 resettodr(); 167 splx(s); 168 169 return (0); 170} 171 172int 173settime(struct proc *p, struct timespec *ts) 174{ 175 return (settime1(p, ts, true)); 176} 177 178/* ARGSUSED */ 179int 180sys___clock_gettime50(struct lwp *l, 181 const struct sys___clock_gettime50_args *uap, register_t *retval) 182{ 183 /* { 184 syscallarg(clockid_t) clock_id; 185 syscallarg(struct timespec *) tp; 186 } */ 187 int error; 188 struct timespec ats; 189 190 error = clock_gettime1(SCARG(uap, clock_id), &ats); 191 if (error != 0) 192 return error; 193 194 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 195} 196 197int 198clock_gettime1(clockid_t clock_id, struct timespec *ts) 199{ 200 201 switch (clock_id) { 202 case CLOCK_REALTIME: 203 nanotime(ts); 204 break; 205 case CLOCK_MONOTONIC: 206 nanouptime(ts); 207 break; 208 default: 209 return EINVAL; 210 } 211 212 return 0; 213} 214 215/* ARGSUSED */ 216int 217sys___clock_settime50(struct lwp *l, 218 const struct sys___clock_settime50_args *uap, register_t *retval) 219{ 220 /* { 221 syscallarg(clockid_t) clock_id; 222 syscallarg(const struct timespec *) tp; 223 } */ 224 int error; 225 struct timespec ats; 226 227 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 228 return error; 229 230 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 231} 232 233 234int 235clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 236 bool check_kauth) 237{ 238 int error; 239 240 switch (clock_id) { 241 case CLOCK_REALTIME: 242 if ((error = settime1(p, tp, check_kauth)) != 0) 243 return (error); 244 break; 245 case CLOCK_MONOTONIC: 246 return (EINVAL); /* read-only clock */ 247 default: 248 return (EINVAL); 249 } 250 251 return 0; 252} 253 254int 255sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 256 register_t *retval) 257{ 258 /* { 259 syscallarg(clockid_t) clock_id; 260 syscallarg(struct timespec *) tp; 261 } */ 262 struct timespec ts; 263 int error = 0; 264 265 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 266 return error; 267 268 if (SCARG(uap, tp)) 269 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 270 271 return error; 272} 273 274int 275clock_getres1(clockid_t clock_id, struct timespec *ts) 276{ 277 278 switch (clock_id) { 279 case CLOCK_REALTIME: 280 case CLOCK_MONOTONIC: 281 ts->tv_sec = 0; 282 if (tc_getfrequency() > 1000000000) 283 ts->tv_nsec = 1; 284 else 285 ts->tv_nsec = 1000000000 / tc_getfrequency(); 286 break; 287 default: 288 return EINVAL; 289 } 290 291 return 0; 292} 293 294/* ARGSUSED */ 295int 296sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 297 register_t *retval) 298{ 299 /* { 300 syscallarg(struct timespec *) rqtp; 301 syscallarg(struct timespec *) rmtp; 302 } */ 303 struct timespec rmt, rqt; 304 int error, error1; 305 306 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 307 if (error) 308 return (error); 309 310 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL); 311 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 312 return error; 313 314 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 315 return error1 ? error1 : error; 316} 317 318int 319nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt) 320{ 321 struct timespec rmtstart; 322 int error, timo; 323 324 if ((error = itimespecfix(rqt)) != 0) 325 return error; 326 327 timo = tstohz(rqt); 328 /* 329 * Avoid inadvertantly sleeping forever 330 */ 331 if (timo == 0) 332 timo = 1; 333 getnanouptime(&rmtstart); 334again: 335 error = kpause("nanoslp", true, timo, NULL); 336 if (rmt != NULL || error == 0) { 337 struct timespec rmtend; 338 struct timespec t0; 339 struct timespec *t; 340 341 getnanouptime(&rmtend); 342 t = (rmt != NULL) ? rmt : &t0; 343 timespecsub(&rmtend, &rmtstart, t); 344 timespecsub(rqt, t, t); 345 if (t->tv_sec < 0) 346 timespecclear(t); 347 if (error == 0) { 348 timo = tstohz(t); 349 if (timo > 0) 350 goto again; 351 } 352 } 353 354 if (error == ERESTART) 355 error = EINTR; 356 if (error == EWOULDBLOCK) 357 error = 0; 358 359 return error; 360} 361 362/* ARGSUSED */ 363int 364sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 365 register_t *retval) 366{ 367 /* { 368 syscallarg(struct timeval *) tp; 369 syscallarg(void *) tzp; really "struct timezone *"; 370 } */ 371 struct timeval atv; 372 int error = 0; 373 struct timezone tzfake; 374 375 if (SCARG(uap, tp)) { 376 microtime(&atv); 377 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 378 if (error) 379 return (error); 380 } 381 if (SCARG(uap, tzp)) { 382 /* 383 * NetBSD has no kernel notion of time zone, so we just 384 * fake up a timezone struct and return it if demanded. 385 */ 386 tzfake.tz_minuteswest = 0; 387 tzfake.tz_dsttime = 0; 388 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 389 } 390 return (error); 391} 392 393/* ARGSUSED */ 394int 395sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 396 register_t *retval) 397{ 398 /* { 399 syscallarg(const struct timeval *) tv; 400 syscallarg(const void *) tzp; really "const struct timezone *"; 401 } */ 402 403 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 404} 405 406int 407settimeofday1(const struct timeval *utv, bool userspace, 408 const void *utzp, struct lwp *l, bool check_kauth) 409{ 410 struct timeval atv; 411 struct timespec ts; 412 int error; 413 414 /* Verify all parameters before changing time. */ 415 416 /* 417 * NetBSD has no kernel notion of time zone, and only an 418 * obsolete program would try to set it, so we log a warning. 419 */ 420 if (utzp) 421 log(LOG_WARNING, "pid %d attempted to set the " 422 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 423 424 if (utv == NULL) 425 return 0; 426 427 if (userspace) { 428 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 429 return error; 430 utv = &atv; 431 } 432 433 TIMEVAL_TO_TIMESPEC(utv, &ts); 434 return settime1(l->l_proc, &ts, check_kauth); 435} 436 437int time_adjusted; /* set if an adjustment is made */ 438 439/* ARGSUSED */ 440int 441sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 442 register_t *retval) 443{ 444 /* { 445 syscallarg(const struct timeval *) delta; 446 syscallarg(struct timeval *) olddelta; 447 } */ 448 int error = 0; 449 struct timeval atv, oldatv; 450 451 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 452 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 453 return error; 454 455 if (SCARG(uap, delta)) { 456 error = copyin(SCARG(uap, delta), &atv, 457 sizeof(*SCARG(uap, delta))); 458 if (error) 459 return (error); 460 } 461 adjtime1(SCARG(uap, delta) ? &atv : NULL, 462 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 463 if (SCARG(uap, olddelta)) 464 error = copyout(&oldatv, SCARG(uap, olddelta), 465 sizeof(*SCARG(uap, olddelta))); 466 return error; 467} 468 469void 470adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 471{ 472 extern int64_t time_adjtime; /* in kern_ntptime.c */ 473 474 if (olddelta) { 475 mutex_spin_enter(&timecounter_lock); 476 olddelta->tv_sec = time_adjtime / 1000000; 477 olddelta->tv_usec = time_adjtime % 1000000; 478 if (olddelta->tv_usec < 0) { 479 olddelta->tv_usec += 1000000; 480 olddelta->tv_sec--; 481 } 482 mutex_spin_exit(&timecounter_lock); 483 } 484 485 if (delta) { 486 mutex_spin_enter(&timecounter_lock); 487 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 488 489 if (time_adjtime) { 490 /* We need to save the system time during shutdown */ 491 time_adjusted |= 1; 492 } 493 mutex_spin_exit(&timecounter_lock); 494 } 495} 496 497/* 498 * Interval timer support. Both the BSD getitimer() family and the POSIX 499 * timer_*() family of routines are supported. 500 * 501 * All timers are kept in an array pointed to by p_timers, which is 502 * allocated on demand - many processes don't use timers at all. The 503 * first three elements in this array are reserved for the BSD timers: 504 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 505 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 506 * allocated by the timer_create() syscall. 507 * 508 * Realtime timers are kept in the ptimer structure as an absolute 509 * time; virtual time timers are kept as a linked list of deltas. 510 * Virtual time timers are processed in the hardclock() routine of 511 * kern_clock.c. The real time timer is processed by a callout 512 * routine, called from the softclock() routine. Since a callout may 513 * be delayed in real time due to interrupt processing in the system, 514 * it is possible for the real time timeout routine (realtimeexpire, 515 * given below), to be delayed in real time past when it is supposed 516 * to occur. It does not suffice, therefore, to reload the real timer 517 * .it_value from the real time timers .it_interval. Rather, we 518 * compute the next time in absolute time the timer should go off. */ 519 520/* Allocate a POSIX realtime timer. */ 521int 522sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 523 register_t *retval) 524{ 525 /* { 526 syscallarg(clockid_t) clock_id; 527 syscallarg(struct sigevent *) evp; 528 syscallarg(timer_t *) timerid; 529 } */ 530 531 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 532 SCARG(uap, evp), copyin, l); 533} 534 535int 536timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 537 copyin_t fetch_event, struct lwp *l) 538{ 539 int error; 540 timer_t timerid; 541 struct ptimers *pts; 542 struct ptimer *pt; 543 struct proc *p; 544 545 p = l->l_proc; 546 547 if ((u_int)id > CLOCK_MONOTONIC) 548 return (EINVAL); 549 550 if ((pts = p->p_timers) == NULL) 551 pts = timers_alloc(p); 552 553 pt = pool_get(&ptimer_pool, PR_WAITOK); 554 if (evp != NULL) { 555 if (((error = 556 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 557 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 558 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 559 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 560 (pt->pt_ev.sigev_signo <= 0 || 561 pt->pt_ev.sigev_signo >= NSIG))) { 562 pool_put(&ptimer_pool, pt); 563 return (error ? error : EINVAL); 564 } 565 } 566 567 /* Find a free timer slot, skipping those reserved for setitimer(). */ 568 mutex_spin_enter(&timer_lock); 569 for (timerid = 3; timerid < TIMER_MAX; timerid++) 570 if (pts->pts_timers[timerid] == NULL) 571 break; 572 if (timerid == TIMER_MAX) { 573 mutex_spin_exit(&timer_lock); 574 pool_put(&ptimer_pool, pt); 575 return EAGAIN; 576 } 577 if (evp == NULL) { 578 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 579 switch (id) { 580 case CLOCK_REALTIME: 581 case CLOCK_MONOTONIC: 582 pt->pt_ev.sigev_signo = SIGALRM; 583 break; 584 case CLOCK_VIRTUAL: 585 pt->pt_ev.sigev_signo = SIGVTALRM; 586 break; 587 case CLOCK_PROF: 588 pt->pt_ev.sigev_signo = SIGPROF; 589 break; 590 } 591 pt->pt_ev.sigev_value.sival_int = timerid; 592 } 593 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 594 pt->pt_info.ksi_errno = 0; 595 pt->pt_info.ksi_code = 0; 596 pt->pt_info.ksi_pid = p->p_pid; 597 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 598 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 599 pt->pt_type = id; 600 pt->pt_proc = p; 601 pt->pt_overruns = 0; 602 pt->pt_poverruns = 0; 603 pt->pt_entry = timerid; 604 pt->pt_queued = false; 605 timespecclear(&pt->pt_time.it_value); 606 if (!CLOCK_VIRTUAL_P(id)) 607 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 608 else 609 pt->pt_active = 0; 610 611 pts->pts_timers[timerid] = pt; 612 mutex_spin_exit(&timer_lock); 613 614 return copyout(&timerid, tid, sizeof(timerid)); 615} 616 617/* Delete a POSIX realtime timer */ 618int 619sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 620 register_t *retval) 621{ 622 /* { 623 syscallarg(timer_t) timerid; 624 } */ 625 struct proc *p = l->l_proc; 626 timer_t timerid; 627 struct ptimers *pts; 628 struct ptimer *pt, *ptn; 629 630 timerid = SCARG(uap, timerid); 631 pts = p->p_timers; 632 633 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 634 return (EINVAL); 635 636 mutex_spin_enter(&timer_lock); 637 if ((pt = pts->pts_timers[timerid]) == NULL) { 638 mutex_spin_exit(&timer_lock); 639 return (EINVAL); 640 } 641 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 642 if (pt->pt_active) { 643 ptn = LIST_NEXT(pt, pt_list); 644 LIST_REMOVE(pt, pt_list); 645 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 646 timespecadd(&pt->pt_time.it_value, 647 &ptn->pt_time.it_value, 648 &ptn->pt_time.it_value); 649 pt->pt_active = 0; 650 } 651 } 652 itimerfree(pts, timerid); 653 654 return (0); 655} 656 657/* 658 * Set up the given timer. The value in pt->pt_time.it_value is taken 659 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 660 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 661 */ 662void 663timer_settime(struct ptimer *pt) 664{ 665 struct ptimer *ptn, *pptn; 666 struct ptlist *ptl; 667 668 KASSERT(mutex_owned(&timer_lock)); 669 670 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 671 callout_halt(&pt->pt_ch, &timer_lock); 672 if (timespecisset(&pt->pt_time.it_value)) { 673 /* 674 * Don't need to check tshzto() return value, here. 675 * callout_reset() does it for us. 676 */ 677 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 678 realtimerexpire, pt); 679 } 680 } else { 681 if (pt->pt_active) { 682 ptn = LIST_NEXT(pt, pt_list); 683 LIST_REMOVE(pt, pt_list); 684 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 685 timespecadd(&pt->pt_time.it_value, 686 &ptn->pt_time.it_value, 687 &ptn->pt_time.it_value); 688 } 689 if (timespecisset(&pt->pt_time.it_value)) { 690 if (pt->pt_type == CLOCK_VIRTUAL) 691 ptl = &pt->pt_proc->p_timers->pts_virtual; 692 else 693 ptl = &pt->pt_proc->p_timers->pts_prof; 694 695 for (ptn = LIST_FIRST(ptl), pptn = NULL; 696 ptn && timespeccmp(&pt->pt_time.it_value, 697 &ptn->pt_time.it_value, >); 698 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 699 timespecsub(&pt->pt_time.it_value, 700 &ptn->pt_time.it_value, 701 &pt->pt_time.it_value); 702 703 if (pptn) 704 LIST_INSERT_AFTER(pptn, pt, pt_list); 705 else 706 LIST_INSERT_HEAD(ptl, pt, pt_list); 707 708 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 709 timespecsub(&ptn->pt_time.it_value, 710 &pt->pt_time.it_value, 711 &ptn->pt_time.it_value); 712 713 pt->pt_active = 1; 714 } else 715 pt->pt_active = 0; 716 } 717} 718 719void 720timer_gettime(struct ptimer *pt, struct itimerspec *aits) 721{ 722 struct timespec now; 723 struct ptimer *ptn; 724 725 KASSERT(mutex_owned(&timer_lock)); 726 727 *aits = pt->pt_time; 728 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 729 /* 730 * Convert from absolute to relative time in .it_value 731 * part of real time timer. If time for real time 732 * timer has passed return 0, else return difference 733 * between current time and time for the timer to go 734 * off. 735 */ 736 if (timespecisset(&aits->it_value)) { 737 if (pt->pt_type == CLOCK_REALTIME) { 738 getnanotime(&now); 739 } else { /* CLOCK_MONOTONIC */ 740 getnanouptime(&now); 741 } 742 if (timespeccmp(&aits->it_value, &now, <)) 743 timespecclear(&aits->it_value); 744 else 745 timespecsub(&aits->it_value, &now, 746 &aits->it_value); 747 } 748 } else if (pt->pt_active) { 749 if (pt->pt_type == CLOCK_VIRTUAL) 750 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 751 else 752 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 753 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 754 timespecadd(&aits->it_value, 755 &ptn->pt_time.it_value, &aits->it_value); 756 KASSERT(ptn != NULL); /* pt should be findable on the list */ 757 } else 758 timespecclear(&aits->it_value); 759} 760 761 762 763/* Set and arm a POSIX realtime timer */ 764int 765sys___timer_settime50(struct lwp *l, 766 const struct sys___timer_settime50_args *uap, 767 register_t *retval) 768{ 769 /* { 770 syscallarg(timer_t) timerid; 771 syscallarg(int) flags; 772 syscallarg(const struct itimerspec *) value; 773 syscallarg(struct itimerspec *) ovalue; 774 } */ 775 int error; 776 struct itimerspec value, ovalue, *ovp = NULL; 777 778 if ((error = copyin(SCARG(uap, value), &value, 779 sizeof(struct itimerspec))) != 0) 780 return (error); 781 782 if (SCARG(uap, ovalue)) 783 ovp = &ovalue; 784 785 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 786 SCARG(uap, flags), l->l_proc)) != 0) 787 return error; 788 789 if (ovp) 790 return copyout(&ovalue, SCARG(uap, ovalue), 791 sizeof(struct itimerspec)); 792 return 0; 793} 794 795int 796dotimer_settime(int timerid, struct itimerspec *value, 797 struct itimerspec *ovalue, int flags, struct proc *p) 798{ 799 struct timespec now; 800 struct itimerspec val, oval; 801 struct ptimers *pts; 802 struct ptimer *pt; 803 int error; 804 805 pts = p->p_timers; 806 807 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 808 return EINVAL; 809 val = *value; 810 if ((error = itimespecfix(&val.it_value)) != 0 || 811 (error = itimespecfix(&val.it_interval)) != 0) 812 return error; 813 814 mutex_spin_enter(&timer_lock); 815 if ((pt = pts->pts_timers[timerid]) == NULL) { 816 mutex_spin_exit(&timer_lock); 817 return EINVAL; 818 } 819 820 oval = pt->pt_time; 821 pt->pt_time = val; 822 823 /* 824 * If we've been passed a relative time for a realtime timer, 825 * convert it to absolute; if an absolute time for a virtual 826 * timer, convert it to relative and make sure we don't set it 827 * to zero, which would cancel the timer, or let it go 828 * negative, which would confuse the comparison tests. 829 */ 830 if (timespecisset(&pt->pt_time.it_value)) { 831 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 832 if ((flags & TIMER_ABSTIME) == 0) { 833 if (pt->pt_type == CLOCK_REALTIME) { 834 getnanotime(&now); 835 } else { /* CLOCK_MONOTONIC */ 836 getnanouptime(&now); 837 } 838 timespecadd(&pt->pt_time.it_value, &now, 839 &pt->pt_time.it_value); 840 } 841 } else { 842 if ((flags & TIMER_ABSTIME) != 0) { 843 getnanotime(&now); 844 timespecsub(&pt->pt_time.it_value, &now, 845 &pt->pt_time.it_value); 846 if (!timespecisset(&pt->pt_time.it_value) || 847 pt->pt_time.it_value.tv_sec < 0) { 848 pt->pt_time.it_value.tv_sec = 0; 849 pt->pt_time.it_value.tv_nsec = 1; 850 } 851 } 852 } 853 } 854 855 timer_settime(pt); 856 mutex_spin_exit(&timer_lock); 857 858 if (ovalue) 859 *ovalue = oval; 860 861 return (0); 862} 863 864/* Return the time remaining until a POSIX timer fires. */ 865int 866sys___timer_gettime50(struct lwp *l, 867 const struct sys___timer_gettime50_args *uap, register_t *retval) 868{ 869 /* { 870 syscallarg(timer_t) timerid; 871 syscallarg(struct itimerspec *) value; 872 } */ 873 struct itimerspec its; 874 int error; 875 876 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 877 &its)) != 0) 878 return error; 879 880 return copyout(&its, SCARG(uap, value), sizeof(its)); 881} 882 883int 884dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 885{ 886 struct ptimer *pt; 887 struct ptimers *pts; 888 889 pts = p->p_timers; 890 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 891 return (EINVAL); 892 mutex_spin_enter(&timer_lock); 893 if ((pt = pts->pts_timers[timerid]) == NULL) { 894 mutex_spin_exit(&timer_lock); 895 return (EINVAL); 896 } 897 timer_gettime(pt, its); 898 mutex_spin_exit(&timer_lock); 899 900 return 0; 901} 902 903/* 904 * Return the count of the number of times a periodic timer expired 905 * while a notification was already pending. The counter is reset when 906 * a timer expires and a notification can be posted. 907 */ 908int 909sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 910 register_t *retval) 911{ 912 /* { 913 syscallarg(timer_t) timerid; 914 } */ 915 struct proc *p = l->l_proc; 916 struct ptimers *pts; 917 int timerid; 918 struct ptimer *pt; 919 920 timerid = SCARG(uap, timerid); 921 922 pts = p->p_timers; 923 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 924 return (EINVAL); 925 mutex_spin_enter(&timer_lock); 926 if ((pt = pts->pts_timers[timerid]) == NULL) { 927 mutex_spin_exit(&timer_lock); 928 return (EINVAL); 929 } 930 *retval = pt->pt_poverruns; 931 mutex_spin_exit(&timer_lock); 932 933 return (0); 934} 935 936#ifdef KERN_SA 937/* Glue function that triggers an upcall; called from userret(). */ 938void 939timerupcall(struct lwp *l) 940{ 941 struct ptimers *pt = l->l_proc->p_timers; 942 struct proc *p = l->l_proc; 943 unsigned int i, fired, done; 944 945 KDASSERT(l->l_proc->p_sa); 946 /* Bail out if we do not own the virtual processor */ 947 if (l->l_savp->savp_lwp != l) 948 return ; 949 950 mutex_enter(p->p_lock); 951 952 fired = pt->pts_fired; 953 done = 0; 954 while ((i = ffs(fired)) != 0) { 955 siginfo_t *si; 956 int mask = 1 << --i; 957 int f; 958 959 f = ~l->l_pflag & LP_SA_NOBLOCK; 960 l->l_pflag |= LP_SA_NOBLOCK; 961 si = siginfo_alloc(PR_WAITOK); 962 si->_info = pt->pts_timers[i]->pt_info.ksi_info; 963 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l, 964 sizeof(*si), si, siginfo_free) != 0) { 965 siginfo_free(si); 966 /* XXX What do we do here?? */ 967 } else 968 done |= mask; 969 fired &= ~mask; 970 l->l_pflag ^= f; 971 } 972 pt->pts_fired &= ~done; 973 if (pt->pts_fired == 0) 974 l->l_proc->p_timerpend = 0; 975 976 mutex_exit(p->p_lock); 977} 978#endif /* KERN_SA */ 979 980/* 981 * Real interval timer expired: 982 * send process whose timer expired an alarm signal. 983 * If time is not set up to reload, then just return. 984 * Else compute next time timer should go off which is > current time. 985 * This is where delay in processing this timeout causes multiple 986 * SIGALRM calls to be compressed into one. 987 */ 988void 989realtimerexpire(void *arg) 990{ 991 uint64_t last_val, next_val, interval, now_ns; 992 struct timespec now, next; 993 struct ptimer *pt; 994 int backwards; 995 996 pt = arg; 997 998 mutex_spin_enter(&timer_lock); 999 itimerfire(pt); 1000 1001 if (!timespecisset(&pt->pt_time.it_interval)) { 1002 timespecclear(&pt->pt_time.it_value); 1003 mutex_spin_exit(&timer_lock); 1004 return; 1005 } 1006 1007 getnanotime(&now); 1008 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1009 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1010 /* Handle the easy case of non-overflown timers first. */ 1011 if (!backwards && timespeccmp(&next, &now, >)) { 1012 pt->pt_time.it_value = next; 1013 } else { 1014 now_ns = timespec2ns(&now); 1015 last_val = timespec2ns(&pt->pt_time.it_value); 1016 interval = timespec2ns(&pt->pt_time.it_interval); 1017 1018 next_val = now_ns + 1019 (now_ns - last_val + interval - 1) % interval; 1020 1021 if (backwards) 1022 next_val += interval; 1023 else 1024 pt->pt_overruns += (now_ns - last_val) / interval; 1025 1026 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1027 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1028 } 1029 1030 /* 1031 * Don't need to check tshzto() return value, here. 1032 * callout_reset() does it for us. 1033 */ 1034 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 1035 realtimerexpire, pt); 1036 mutex_spin_exit(&timer_lock); 1037} 1038 1039/* BSD routine to get the value of an interval timer. */ 1040/* ARGSUSED */ 1041int 1042sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1043 register_t *retval) 1044{ 1045 /* { 1046 syscallarg(int) which; 1047 syscallarg(struct itimerval *) itv; 1048 } */ 1049 struct proc *p = l->l_proc; 1050 struct itimerval aitv; 1051 int error; 1052 1053 error = dogetitimer(p, SCARG(uap, which), &aitv); 1054 if (error) 1055 return error; 1056 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1057} 1058 1059int 1060dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1061{ 1062 struct ptimers *pts; 1063 struct ptimer *pt; 1064 struct itimerspec its; 1065 1066 if ((u_int)which > ITIMER_MONOTONIC) 1067 return (EINVAL); 1068 1069 mutex_spin_enter(&timer_lock); 1070 pts = p->p_timers; 1071 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1072 timerclear(&itvp->it_value); 1073 timerclear(&itvp->it_interval); 1074 } else { 1075 timer_gettime(pt, &its); 1076 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1077 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1078 } 1079 mutex_spin_exit(&timer_lock); 1080 1081 return 0; 1082} 1083 1084/* BSD routine to set/arm an interval timer. */ 1085/* ARGSUSED */ 1086int 1087sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1088 register_t *retval) 1089{ 1090 /* { 1091 syscallarg(int) which; 1092 syscallarg(const struct itimerval *) itv; 1093 syscallarg(struct itimerval *) oitv; 1094 } */ 1095 struct proc *p = l->l_proc; 1096 int which = SCARG(uap, which); 1097 struct sys___getitimer50_args getargs; 1098 const struct itimerval *itvp; 1099 struct itimerval aitv; 1100 int error; 1101 1102 if ((u_int)which > ITIMER_MONOTONIC) 1103 return (EINVAL); 1104 itvp = SCARG(uap, itv); 1105 if (itvp && 1106 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0)) 1107 return (error); 1108 if (SCARG(uap, oitv) != NULL) { 1109 SCARG(&getargs, which) = which; 1110 SCARG(&getargs, itv) = SCARG(uap, oitv); 1111 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1112 return (error); 1113 } 1114 if (itvp == 0) 1115 return (0); 1116 1117 return dosetitimer(p, which, &aitv); 1118} 1119 1120int 1121dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1122{ 1123 struct timespec now; 1124 struct ptimers *pts; 1125 struct ptimer *pt, *spare; 1126 1127 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1128 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1129 return (EINVAL); 1130 1131 /* 1132 * Don't bother allocating data structures if the process just 1133 * wants to clear the timer. 1134 */ 1135 spare = NULL; 1136 pts = p->p_timers; 1137 retry: 1138 if (!timerisset(&itvp->it_value) && (pts == NULL || 1139 pts->pts_timers[which] == NULL)) 1140 return (0); 1141 if (pts == NULL) 1142 pts = timers_alloc(p); 1143 mutex_spin_enter(&timer_lock); 1144 pt = pts->pts_timers[which]; 1145 if (pt == NULL) { 1146 if (spare == NULL) { 1147 mutex_spin_exit(&timer_lock); 1148 spare = pool_get(&ptimer_pool, PR_WAITOK); 1149 goto retry; 1150 } 1151 pt = spare; 1152 spare = NULL; 1153 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1154 pt->pt_ev.sigev_value.sival_int = which; 1155 pt->pt_overruns = 0; 1156 pt->pt_proc = p; 1157 pt->pt_type = which; 1158 pt->pt_entry = which; 1159 pt->pt_queued = false; 1160 if (pt->pt_type == CLOCK_REALTIME) 1161 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1162 else 1163 pt->pt_active = 0; 1164 1165 switch (which) { 1166 case ITIMER_REAL: 1167 case ITIMER_MONOTONIC: 1168 pt->pt_ev.sigev_signo = SIGALRM; 1169 break; 1170 case ITIMER_VIRTUAL: 1171 pt->pt_ev.sigev_signo = SIGVTALRM; 1172 break; 1173 case ITIMER_PROF: 1174 pt->pt_ev.sigev_signo = SIGPROF; 1175 break; 1176 } 1177 pts->pts_timers[which] = pt; 1178 } 1179 1180 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1181 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1182 1183 if (timespecisset(&pt->pt_time.it_value)) { 1184 /* Convert to absolute time */ 1185 /* XXX need to wrap in splclock for timecounters case? */ 1186 switch (which) { 1187 case ITIMER_REAL: 1188 getnanotime(&now); 1189 timespecadd(&pt->pt_time.it_value, &now, 1190 &pt->pt_time.it_value); 1191 break; 1192 case ITIMER_MONOTONIC: 1193 getnanouptime(&now); 1194 timespecadd(&pt->pt_time.it_value, &now, 1195 &pt->pt_time.it_value); 1196 break; 1197 default: 1198 break; 1199 } 1200 } 1201 timer_settime(pt); 1202 mutex_spin_exit(&timer_lock); 1203 if (spare != NULL) 1204 pool_put(&ptimer_pool, spare); 1205 1206 return (0); 1207} 1208 1209/* Utility routines to manage the array of pointers to timers. */ 1210struct ptimers * 1211timers_alloc(struct proc *p) 1212{ 1213 struct ptimers *pts; 1214 int i; 1215 1216 pts = pool_get(&ptimers_pool, PR_WAITOK); 1217 LIST_INIT(&pts->pts_virtual); 1218 LIST_INIT(&pts->pts_prof); 1219 for (i = 0; i < TIMER_MAX; i++) 1220 pts->pts_timers[i] = NULL; 1221 pts->pts_fired = 0; 1222 mutex_spin_enter(&timer_lock); 1223 if (p->p_timers == NULL) { 1224 p->p_timers = pts; 1225 mutex_spin_exit(&timer_lock); 1226 return pts; 1227 } 1228 mutex_spin_exit(&timer_lock); 1229 pool_put(&ptimers_pool, pts); 1230 return p->p_timers; 1231} 1232 1233/* 1234 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1235 * then clean up all timers and free all the data structures. If 1236 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1237 * by timer_create(), not the BSD setitimer() timers, and only free the 1238 * structure if none of those remain. 1239 */ 1240void 1241timers_free(struct proc *p, int which) 1242{ 1243 struct ptimers *pts; 1244 struct ptimer *ptn; 1245 struct timespec ts; 1246 int i; 1247 1248 if (p->p_timers == NULL) 1249 return; 1250 1251 pts = p->p_timers; 1252 mutex_spin_enter(&timer_lock); 1253 if (which == TIMERS_ALL) { 1254 p->p_timers = NULL; 1255 i = 0; 1256 } else { 1257 timespecclear(&ts); 1258 for (ptn = LIST_FIRST(&pts->pts_virtual); 1259 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1260 ptn = LIST_NEXT(ptn, pt_list)) { 1261 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1262 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1263 } 1264 LIST_FIRST(&pts->pts_virtual) = NULL; 1265 if (ptn) { 1266 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1267 timespecadd(&ts, &ptn->pt_time.it_value, 1268 &ptn->pt_time.it_value); 1269 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1270 } 1271 timespecclear(&ts); 1272 for (ptn = LIST_FIRST(&pts->pts_prof); 1273 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1274 ptn = LIST_NEXT(ptn, pt_list)) { 1275 KASSERT(ptn->pt_type == CLOCK_PROF); 1276 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1277 } 1278 LIST_FIRST(&pts->pts_prof) = NULL; 1279 if (ptn) { 1280 KASSERT(ptn->pt_type == CLOCK_PROF); 1281 timespecadd(&ts, &ptn->pt_time.it_value, 1282 &ptn->pt_time.it_value); 1283 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1284 } 1285 i = 3; 1286 } 1287 for ( ; i < TIMER_MAX; i++) { 1288 if (pts->pts_timers[i] != NULL) { 1289 itimerfree(pts, i); 1290 mutex_spin_enter(&timer_lock); 1291 } 1292 } 1293 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1294 pts->pts_timers[2] == NULL) { 1295 p->p_timers = NULL; 1296 mutex_spin_exit(&timer_lock); 1297 pool_put(&ptimers_pool, pts); 1298 } else 1299 mutex_spin_exit(&timer_lock); 1300} 1301 1302static void 1303itimerfree(struct ptimers *pts, int index) 1304{ 1305 struct ptimer *pt; 1306 1307 KASSERT(mutex_owned(&timer_lock)); 1308 1309 pt = pts->pts_timers[index]; 1310 pts->pts_timers[index] = NULL; 1311 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1312 callout_halt(&pt->pt_ch, &timer_lock); 1313 if (pt->pt_queued) 1314 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1315 mutex_spin_exit(&timer_lock); 1316 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1317 callout_destroy(&pt->pt_ch); 1318 pool_put(&ptimer_pool, pt); 1319} 1320 1321/* 1322 * Decrement an interval timer by a specified number 1323 * of nanoseconds, which must be less than a second, 1324 * i.e. < 1000000000. If the timer expires, then reload 1325 * it. In this case, carry over (nsec - old value) to 1326 * reduce the value reloaded into the timer so that 1327 * the timer does not drift. This routine assumes 1328 * that it is called in a context where the timers 1329 * on which it is operating cannot change in value. 1330 */ 1331static int 1332itimerdecr(struct ptimer *pt, int nsec) 1333{ 1334 struct itimerspec *itp; 1335 1336 KASSERT(mutex_owned(&timer_lock)); 1337 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1338 1339 itp = &pt->pt_time; 1340 if (itp->it_value.tv_nsec < nsec) { 1341 if (itp->it_value.tv_sec == 0) { 1342 /* expired, and already in next interval */ 1343 nsec -= itp->it_value.tv_nsec; 1344 goto expire; 1345 } 1346 itp->it_value.tv_nsec += 1000000000; 1347 itp->it_value.tv_sec--; 1348 } 1349 itp->it_value.tv_nsec -= nsec; 1350 nsec = 0; 1351 if (timespecisset(&itp->it_value)) 1352 return (1); 1353 /* expired, exactly at end of interval */ 1354expire: 1355 if (timespecisset(&itp->it_interval)) { 1356 itp->it_value = itp->it_interval; 1357 itp->it_value.tv_nsec -= nsec; 1358 if (itp->it_value.tv_nsec < 0) { 1359 itp->it_value.tv_nsec += 1000000000; 1360 itp->it_value.tv_sec--; 1361 } 1362 timer_settime(pt); 1363 } else 1364 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1365 return (0); 1366} 1367 1368static void 1369itimerfire(struct ptimer *pt) 1370{ 1371 1372 KASSERT(mutex_owned(&timer_lock)); 1373 1374 /* 1375 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1376 * XXX Relying on the clock interrupt is stupid. 1377 */ 1378 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) || 1379 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL && 1380 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued) 1381 return; 1382 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1383 pt->pt_queued = true; 1384 softint_schedule(timer_sih); 1385} 1386 1387void 1388timer_tick(lwp_t *l, bool user) 1389{ 1390 struct ptimers *pts; 1391 struct ptimer *pt; 1392 proc_t *p; 1393 1394 p = l->l_proc; 1395 if (p->p_timers == NULL) 1396 return; 1397 1398 mutex_spin_enter(&timer_lock); 1399 if ((pts = l->l_proc->p_timers) != NULL) { 1400 /* 1401 * Run current process's virtual and profile time, as needed. 1402 */ 1403 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1404 if (itimerdecr(pt, tick * 1000) == 0) 1405 itimerfire(pt); 1406 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1407 if (itimerdecr(pt, tick * 1000) == 0) 1408 itimerfire(pt); 1409 } 1410 mutex_spin_exit(&timer_lock); 1411} 1412 1413#ifdef KERN_SA 1414/* 1415 * timer_sa_intr: 1416 * 1417 * SIGEV_SA handling for timer_intr(). We are called (and return) 1418 * with the timer lock held. We know that the process had SA enabled 1419 * when this timer was enqueued. As timer_intr() is a soft interrupt 1420 * handler, SA should still be enabled by the time we get here. 1421 */ 1422static void 1423timer_sa_intr(struct ptimer *pt, proc_t *p) 1424{ 1425 unsigned int i; 1426 struct sadata *sa; 1427 struct sadata_vp *vp; 1428 1429 /* Cause the process to generate an upcall when it returns. */ 1430 if (!p->p_timerpend) { 1431 /* 1432 * XXX stop signals can be processed inside tsleep, 1433 * which can be inside sa_yield's inner loop, which 1434 * makes testing for sa_idle alone insuffucent to 1435 * determine if we really should call setrunnable. 1436 */ 1437 pt->pt_poverruns = pt->pt_overruns; 1438 pt->pt_overruns = 0; 1439 i = 1 << pt->pt_entry; 1440 p->p_timers->pts_fired = i; 1441 p->p_timerpend = 1; 1442 1443 sa = p->p_sa; 1444 mutex_enter(&sa->sa_mutex); 1445 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) { 1446 struct lwp *vp_lwp = vp->savp_lwp; 1447 lwp_lock(vp_lwp); 1448 lwp_need_userret(vp_lwp); 1449 if (vp_lwp->l_flag & LW_SA_IDLE) { 1450 vp_lwp->l_flag &= ~LW_SA_IDLE; 1451 lwp_unsleep(vp_lwp, true); 1452 break; 1453 } 1454 lwp_unlock(vp_lwp); 1455 } 1456 mutex_exit(&sa->sa_mutex); 1457 } else { 1458 i = 1 << pt->pt_entry; 1459 if ((p->p_timers->pts_fired & i) == 0) { 1460 pt->pt_poverruns = pt->pt_overruns; 1461 pt->pt_overruns = 0; 1462 p->p_timers->pts_fired |= i; 1463 } else 1464 pt->pt_overruns++; 1465 } 1466} 1467#endif /* KERN_SA */ 1468 1469static void 1470timer_intr(void *cookie) 1471{ 1472 ksiginfo_t ksi; 1473 struct ptimer *pt; 1474 proc_t *p; 1475 1476 mutex_enter(proc_lock); 1477 mutex_spin_enter(&timer_lock); 1478 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1479 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1480 KASSERT(pt->pt_queued); 1481 pt->pt_queued = false; 1482 1483 if (pt->pt_proc->p_timers == NULL) { 1484 /* Process is dying. */ 1485 continue; 1486 } 1487 p = pt->pt_proc; 1488#ifdef KERN_SA 1489 if (pt->pt_ev.sigev_notify == SIGEV_SA) { 1490 timer_sa_intr(pt, p); 1491 continue; 1492 } 1493#endif /* KERN_SA */ 1494 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) 1495 continue; 1496 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1497 pt->pt_overruns++; 1498 continue; 1499 } 1500 1501 KSI_INIT(&ksi); 1502 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1503 ksi.ksi_code = SI_TIMER; 1504 ksi.ksi_value = pt->pt_ev.sigev_value; 1505 pt->pt_poverruns = pt->pt_overruns; 1506 pt->pt_overruns = 0; 1507 mutex_spin_exit(&timer_lock); 1508 kpsignal(p, &ksi, NULL); 1509 mutex_spin_enter(&timer_lock); 1510 } 1511 mutex_spin_exit(&timer_lock); 1512 mutex_exit(proc_lock); 1513} 1514 1515/* 1516 * Check if the time will wrap if set to ts. 1517 * 1518 * ts - timespec describing the new time 1519 * delta - the delta between the current time and ts 1520 */ 1521bool 1522time_wraps(struct timespec *ts, struct timespec *delta) 1523{ 1524 1525 /* 1526 * Don't allow the time to be set forward so far it 1527 * will wrap and become negative, thus allowing an 1528 * attacker to bypass the next check below. The 1529 * cutoff is 1 year before rollover occurs, so even 1530 * if the attacker uses adjtime(2) to move the time 1531 * past the cutoff, it will take a very long time 1532 * to get to the wrap point. 1533 */ 1534 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1535 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1536 return true; 1537 1538 return false; 1539} 1540