kern_time.c revision 1.174
1/* $NetBSD: kern_time.c,v 1.174 2012/03/22 17:46:07 dholland Exp $ */ 2 3/*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63#include <sys/cdefs.h> 64__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.174 2012/03/22 17:46:07 dholland Exp $"); 65 66#include <sys/param.h> 67#include <sys/resourcevar.h> 68#include <sys/kernel.h> 69#include <sys/systm.h> 70#include <sys/proc.h> 71#include <sys/vnode.h> 72#include <sys/signalvar.h> 73#include <sys/syslog.h> 74#include <sys/timetc.h> 75#include <sys/timex.h> 76#include <sys/kauth.h> 77#include <sys/mount.h> 78#include <sys/syscallargs.h> 79#include <sys/cpu.h> 80 81static void timer_intr(void *); 82static void itimerfire(struct ptimer *); 83static void itimerfree(struct ptimers *, int); 84 85kmutex_t timer_lock; 86 87static void *timer_sih; 88static TAILQ_HEAD(, ptimer) timer_queue; 89 90struct pool ptimer_pool, ptimers_pool; 91 92#define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97CTASSERT(ITIMER_PROF == CLOCK_PROF); 98CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100/* 101 * Initialize timekeeping. 102 */ 103void 104time_init(void) 105{ 106 107 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 108 &pool_allocator_nointr, IPL_NONE); 109 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 110 &pool_allocator_nointr, IPL_NONE); 111} 112 113void 114time_init2(void) 115{ 116 117 TAILQ_INIT(&timer_queue); 118 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 119 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 120 timer_intr, NULL); 121} 122 123/* Time of day and interval timer support. 124 * 125 * These routines provide the kernel entry points to get and set 126 * the time-of-day and per-process interval timers. Subroutines 127 * here provide support for adding and subtracting timeval structures 128 * and decrementing interval timers, optionally reloading the interval 129 * timers when they expire. 130 */ 131 132/* This function is used by clock_settime and settimeofday */ 133static int 134settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 135{ 136 struct timespec delta, now; 137 int s; 138 139 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 140 s = splclock(); 141 nanotime(&now); 142 timespecsub(ts, &now, &delta); 143 144 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 145 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 146 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 147 splx(s); 148 return (EPERM); 149 } 150 151#ifdef notyet 152 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 153 splx(s); 154 return (EPERM); 155 } 156#endif 157 158 tc_setclock(ts); 159 160 timespecadd(&boottime, &delta, &boottime); 161 162 resettodr(); 163 splx(s); 164 165 return (0); 166} 167 168int 169settime(struct proc *p, struct timespec *ts) 170{ 171 return (settime1(p, ts, true)); 172} 173 174/* ARGSUSED */ 175int 176sys___clock_gettime50(struct lwp *l, 177 const struct sys___clock_gettime50_args *uap, register_t *retval) 178{ 179 /* { 180 syscallarg(clockid_t) clock_id; 181 syscallarg(struct timespec *) tp; 182 } */ 183 int error; 184 struct timespec ats; 185 186 error = clock_gettime1(SCARG(uap, clock_id), &ats); 187 if (error != 0) 188 return error; 189 190 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 191} 192 193int 194clock_gettime1(clockid_t clock_id, struct timespec *ts) 195{ 196 197 switch (clock_id) { 198 case CLOCK_REALTIME: 199 nanotime(ts); 200 break; 201 case CLOCK_MONOTONIC: 202 nanouptime(ts); 203 break; 204 default: 205 return EINVAL; 206 } 207 208 return 0; 209} 210 211/* ARGSUSED */ 212int 213sys___clock_settime50(struct lwp *l, 214 const struct sys___clock_settime50_args *uap, register_t *retval) 215{ 216 /* { 217 syscallarg(clockid_t) clock_id; 218 syscallarg(const struct timespec *) tp; 219 } */ 220 int error; 221 struct timespec ats; 222 223 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 224 return error; 225 226 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 227} 228 229 230int 231clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 232 bool check_kauth) 233{ 234 int error; 235 236 switch (clock_id) { 237 case CLOCK_REALTIME: 238 if ((error = settime1(p, tp, check_kauth)) != 0) 239 return (error); 240 break; 241 case CLOCK_MONOTONIC: 242 return (EINVAL); /* read-only clock */ 243 default: 244 return (EINVAL); 245 } 246 247 return 0; 248} 249 250int 251sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 252 register_t *retval) 253{ 254 /* { 255 syscallarg(clockid_t) clock_id; 256 syscallarg(struct timespec *) tp; 257 } */ 258 struct timespec ts; 259 int error = 0; 260 261 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 262 return error; 263 264 if (SCARG(uap, tp)) 265 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 266 267 return error; 268} 269 270int 271clock_getres1(clockid_t clock_id, struct timespec *ts) 272{ 273 274 switch (clock_id) { 275 case CLOCK_REALTIME: 276 case CLOCK_MONOTONIC: 277 ts->tv_sec = 0; 278 if (tc_getfrequency() > 1000000000) 279 ts->tv_nsec = 1; 280 else 281 ts->tv_nsec = 1000000000 / tc_getfrequency(); 282 break; 283 default: 284 return EINVAL; 285 } 286 287 return 0; 288} 289 290/* ARGSUSED */ 291int 292sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 293 register_t *retval) 294{ 295 /* { 296 syscallarg(struct timespec *) rqtp; 297 syscallarg(struct timespec *) rmtp; 298 } */ 299 struct timespec rmt, rqt; 300 int error, error1; 301 302 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 303 if (error) 304 return (error); 305 306 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL); 307 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 308 return error; 309 310 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 311 return error1 ? error1 : error; 312} 313 314int 315nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt) 316{ 317 struct timespec rmtstart; 318 int error, timo; 319 320 if ((error = itimespecfix(rqt)) != 0) 321 return error; 322 323 timo = tstohz(rqt); 324 /* 325 * Avoid inadvertantly sleeping forever 326 */ 327 if (timo == 0) 328 timo = 1; 329 getnanouptime(&rmtstart); 330again: 331 error = kpause("nanoslp", true, timo, NULL); 332 if (rmt != NULL || error == 0) { 333 struct timespec rmtend; 334 struct timespec t0; 335 struct timespec *t; 336 337 getnanouptime(&rmtend); 338 t = (rmt != NULL) ? rmt : &t0; 339 timespecsub(&rmtend, &rmtstart, t); 340 timespecsub(rqt, t, t); 341 if (t->tv_sec < 0) 342 timespecclear(t); 343 if (error == 0) { 344 timo = tstohz(t); 345 if (timo > 0) 346 goto again; 347 } 348 } 349 350 if (error == ERESTART) 351 error = EINTR; 352 if (error == EWOULDBLOCK) 353 error = 0; 354 355 return error; 356} 357 358/* ARGSUSED */ 359int 360sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 361 register_t *retval) 362{ 363 /* { 364 syscallarg(struct timeval *) tp; 365 syscallarg(void *) tzp; really "struct timezone *"; 366 } */ 367 struct timeval atv; 368 int error = 0; 369 struct timezone tzfake; 370 371 if (SCARG(uap, tp)) { 372 microtime(&atv); 373 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 374 if (error) 375 return (error); 376 } 377 if (SCARG(uap, tzp)) { 378 /* 379 * NetBSD has no kernel notion of time zone, so we just 380 * fake up a timezone struct and return it if demanded. 381 */ 382 tzfake.tz_minuteswest = 0; 383 tzfake.tz_dsttime = 0; 384 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 385 } 386 return (error); 387} 388 389/* ARGSUSED */ 390int 391sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 392 register_t *retval) 393{ 394 /* { 395 syscallarg(const struct timeval *) tv; 396 syscallarg(const void *) tzp; really "const struct timezone *"; 397 } */ 398 399 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 400} 401 402int 403settimeofday1(const struct timeval *utv, bool userspace, 404 const void *utzp, struct lwp *l, bool check_kauth) 405{ 406 struct timeval atv; 407 struct timespec ts; 408 int error; 409 410 /* Verify all parameters before changing time. */ 411 412 /* 413 * NetBSD has no kernel notion of time zone, and only an 414 * obsolete program would try to set it, so we log a warning. 415 */ 416 if (utzp) 417 log(LOG_WARNING, "pid %d attempted to set the " 418 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 419 420 if (utv == NULL) 421 return 0; 422 423 if (userspace) { 424 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 425 return error; 426 utv = &atv; 427 } 428 429 TIMEVAL_TO_TIMESPEC(utv, &ts); 430 return settime1(l->l_proc, &ts, check_kauth); 431} 432 433int time_adjusted; /* set if an adjustment is made */ 434 435/* ARGSUSED */ 436int 437sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 438 register_t *retval) 439{ 440 /* { 441 syscallarg(const struct timeval *) delta; 442 syscallarg(struct timeval *) olddelta; 443 } */ 444 int error = 0; 445 struct timeval atv, oldatv; 446 447 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 448 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 449 return error; 450 451 if (SCARG(uap, delta)) { 452 error = copyin(SCARG(uap, delta), &atv, 453 sizeof(*SCARG(uap, delta))); 454 if (error) 455 return (error); 456 } 457 adjtime1(SCARG(uap, delta) ? &atv : NULL, 458 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 459 if (SCARG(uap, olddelta)) 460 error = copyout(&oldatv, SCARG(uap, olddelta), 461 sizeof(*SCARG(uap, olddelta))); 462 return error; 463} 464 465void 466adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 467{ 468 extern int64_t time_adjtime; /* in kern_ntptime.c */ 469 470 if (olddelta) { 471 mutex_spin_enter(&timecounter_lock); 472 olddelta->tv_sec = time_adjtime / 1000000; 473 olddelta->tv_usec = time_adjtime % 1000000; 474 if (olddelta->tv_usec < 0) { 475 olddelta->tv_usec += 1000000; 476 olddelta->tv_sec--; 477 } 478 mutex_spin_exit(&timecounter_lock); 479 } 480 481 if (delta) { 482 mutex_spin_enter(&timecounter_lock); 483 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 484 485 if (time_adjtime) { 486 /* We need to save the system time during shutdown */ 487 time_adjusted |= 1; 488 } 489 mutex_spin_exit(&timecounter_lock); 490 } 491} 492 493/* 494 * Interval timer support. Both the BSD getitimer() family and the POSIX 495 * timer_*() family of routines are supported. 496 * 497 * All timers are kept in an array pointed to by p_timers, which is 498 * allocated on demand - many processes don't use timers at all. The 499 * first three elements in this array are reserved for the BSD timers: 500 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 501 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 502 * allocated by the timer_create() syscall. 503 * 504 * Realtime timers are kept in the ptimer structure as an absolute 505 * time; virtual time timers are kept as a linked list of deltas. 506 * Virtual time timers are processed in the hardclock() routine of 507 * kern_clock.c. The real time timer is processed by a callout 508 * routine, called from the softclock() routine. Since a callout may 509 * be delayed in real time due to interrupt processing in the system, 510 * it is possible for the real time timeout routine (realtimeexpire, 511 * given below), to be delayed in real time past when it is supposed 512 * to occur. It does not suffice, therefore, to reload the real timer 513 * .it_value from the real time timers .it_interval. Rather, we 514 * compute the next time in absolute time the timer should go off. */ 515 516/* Allocate a POSIX realtime timer. */ 517int 518sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 519 register_t *retval) 520{ 521 /* { 522 syscallarg(clockid_t) clock_id; 523 syscallarg(struct sigevent *) evp; 524 syscallarg(timer_t *) timerid; 525 } */ 526 527 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 528 SCARG(uap, evp), copyin, l); 529} 530 531int 532timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 533 copyin_t fetch_event, struct lwp *l) 534{ 535 int error; 536 timer_t timerid; 537 struct ptimers *pts; 538 struct ptimer *pt; 539 struct proc *p; 540 541 p = l->l_proc; 542 543 if ((u_int)id > CLOCK_MONOTONIC) 544 return (EINVAL); 545 546 if ((pts = p->p_timers) == NULL) 547 pts = timers_alloc(p); 548 549 pt = pool_get(&ptimer_pool, PR_WAITOK); 550 if (evp != NULL) { 551 if (((error = 552 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 553 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 554 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 555 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 556 (pt->pt_ev.sigev_signo <= 0 || 557 pt->pt_ev.sigev_signo >= NSIG))) { 558 pool_put(&ptimer_pool, pt); 559 return (error ? error : EINVAL); 560 } 561 } 562 563 /* Find a free timer slot, skipping those reserved for setitimer(). */ 564 mutex_spin_enter(&timer_lock); 565 for (timerid = 3; timerid < TIMER_MAX; timerid++) 566 if (pts->pts_timers[timerid] == NULL) 567 break; 568 if (timerid == TIMER_MAX) { 569 mutex_spin_exit(&timer_lock); 570 pool_put(&ptimer_pool, pt); 571 return EAGAIN; 572 } 573 if (evp == NULL) { 574 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 575 switch (id) { 576 case CLOCK_REALTIME: 577 case CLOCK_MONOTONIC: 578 pt->pt_ev.sigev_signo = SIGALRM; 579 break; 580 case CLOCK_VIRTUAL: 581 pt->pt_ev.sigev_signo = SIGVTALRM; 582 break; 583 case CLOCK_PROF: 584 pt->pt_ev.sigev_signo = SIGPROF; 585 break; 586 } 587 pt->pt_ev.sigev_value.sival_int = timerid; 588 } 589 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 590 pt->pt_info.ksi_errno = 0; 591 pt->pt_info.ksi_code = 0; 592 pt->pt_info.ksi_pid = p->p_pid; 593 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 594 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 595 pt->pt_type = id; 596 pt->pt_proc = p; 597 pt->pt_overruns = 0; 598 pt->pt_poverruns = 0; 599 pt->pt_entry = timerid; 600 pt->pt_queued = false; 601 timespecclear(&pt->pt_time.it_value); 602 if (!CLOCK_VIRTUAL_P(id)) 603 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 604 else 605 pt->pt_active = 0; 606 607 pts->pts_timers[timerid] = pt; 608 mutex_spin_exit(&timer_lock); 609 610 return copyout(&timerid, tid, sizeof(timerid)); 611} 612 613/* Delete a POSIX realtime timer */ 614int 615sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 616 register_t *retval) 617{ 618 /* { 619 syscallarg(timer_t) timerid; 620 } */ 621 struct proc *p = l->l_proc; 622 timer_t timerid; 623 struct ptimers *pts; 624 struct ptimer *pt, *ptn; 625 626 timerid = SCARG(uap, timerid); 627 pts = p->p_timers; 628 629 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 630 return (EINVAL); 631 632 mutex_spin_enter(&timer_lock); 633 if ((pt = pts->pts_timers[timerid]) == NULL) { 634 mutex_spin_exit(&timer_lock); 635 return (EINVAL); 636 } 637 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 638 if (pt->pt_active) { 639 ptn = LIST_NEXT(pt, pt_list); 640 LIST_REMOVE(pt, pt_list); 641 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 642 timespecadd(&pt->pt_time.it_value, 643 &ptn->pt_time.it_value, 644 &ptn->pt_time.it_value); 645 pt->pt_active = 0; 646 } 647 } 648 itimerfree(pts, timerid); 649 650 return (0); 651} 652 653/* 654 * Set up the given timer. The value in pt->pt_time.it_value is taken 655 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 656 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 657 */ 658void 659timer_settime(struct ptimer *pt) 660{ 661 struct ptimer *ptn, *pptn; 662 struct ptlist *ptl; 663 664 KASSERT(mutex_owned(&timer_lock)); 665 666 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 667 callout_halt(&pt->pt_ch, &timer_lock); 668 if (timespecisset(&pt->pt_time.it_value)) { 669 /* 670 * Don't need to check tshzto() return value, here. 671 * callout_reset() does it for us. 672 */ 673 callout_reset(&pt->pt_ch, 674 pt->pt_type == CLOCK_MONOTONIC ? 675 tshztoup(&pt->pt_time.it_value) : 676 tshzto(&pt->pt_time.it_value), 677 realtimerexpire, pt); 678 } 679 } else { 680 if (pt->pt_active) { 681 ptn = LIST_NEXT(pt, pt_list); 682 LIST_REMOVE(pt, pt_list); 683 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 684 timespecadd(&pt->pt_time.it_value, 685 &ptn->pt_time.it_value, 686 &ptn->pt_time.it_value); 687 } 688 if (timespecisset(&pt->pt_time.it_value)) { 689 if (pt->pt_type == CLOCK_VIRTUAL) 690 ptl = &pt->pt_proc->p_timers->pts_virtual; 691 else 692 ptl = &pt->pt_proc->p_timers->pts_prof; 693 694 for (ptn = LIST_FIRST(ptl), pptn = NULL; 695 ptn && timespeccmp(&pt->pt_time.it_value, 696 &ptn->pt_time.it_value, >); 697 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 698 timespecsub(&pt->pt_time.it_value, 699 &ptn->pt_time.it_value, 700 &pt->pt_time.it_value); 701 702 if (pptn) 703 LIST_INSERT_AFTER(pptn, pt, pt_list); 704 else 705 LIST_INSERT_HEAD(ptl, pt, pt_list); 706 707 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 708 timespecsub(&ptn->pt_time.it_value, 709 &pt->pt_time.it_value, 710 &ptn->pt_time.it_value); 711 712 pt->pt_active = 1; 713 } else 714 pt->pt_active = 0; 715 } 716} 717 718void 719timer_gettime(struct ptimer *pt, struct itimerspec *aits) 720{ 721 struct timespec now; 722 struct ptimer *ptn; 723 724 KASSERT(mutex_owned(&timer_lock)); 725 726 *aits = pt->pt_time; 727 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 728 /* 729 * Convert from absolute to relative time in .it_value 730 * part of real time timer. If time for real time 731 * timer has passed return 0, else return difference 732 * between current time and time for the timer to go 733 * off. 734 */ 735 if (timespecisset(&aits->it_value)) { 736 if (pt->pt_type == CLOCK_REALTIME) { 737 getnanotime(&now); 738 } else { /* CLOCK_MONOTONIC */ 739 getnanouptime(&now); 740 } 741 if (timespeccmp(&aits->it_value, &now, <)) 742 timespecclear(&aits->it_value); 743 else 744 timespecsub(&aits->it_value, &now, 745 &aits->it_value); 746 } 747 } else if (pt->pt_active) { 748 if (pt->pt_type == CLOCK_VIRTUAL) 749 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 750 else 751 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 752 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 753 timespecadd(&aits->it_value, 754 &ptn->pt_time.it_value, &aits->it_value); 755 KASSERT(ptn != NULL); /* pt should be findable on the list */ 756 } else 757 timespecclear(&aits->it_value); 758} 759 760 761 762/* Set and arm a POSIX realtime timer */ 763int 764sys___timer_settime50(struct lwp *l, 765 const struct sys___timer_settime50_args *uap, 766 register_t *retval) 767{ 768 /* { 769 syscallarg(timer_t) timerid; 770 syscallarg(int) flags; 771 syscallarg(const struct itimerspec *) value; 772 syscallarg(struct itimerspec *) ovalue; 773 } */ 774 int error; 775 struct itimerspec value, ovalue, *ovp = NULL; 776 777 if ((error = copyin(SCARG(uap, value), &value, 778 sizeof(struct itimerspec))) != 0) 779 return (error); 780 781 if (SCARG(uap, ovalue)) 782 ovp = &ovalue; 783 784 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 785 SCARG(uap, flags), l->l_proc)) != 0) 786 return error; 787 788 if (ovp) 789 return copyout(&ovalue, SCARG(uap, ovalue), 790 sizeof(struct itimerspec)); 791 return 0; 792} 793 794int 795dotimer_settime(int timerid, struct itimerspec *value, 796 struct itimerspec *ovalue, int flags, struct proc *p) 797{ 798 struct timespec now; 799 struct itimerspec val, oval; 800 struct ptimers *pts; 801 struct ptimer *pt; 802 int error; 803 804 pts = p->p_timers; 805 806 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 807 return EINVAL; 808 val = *value; 809 if ((error = itimespecfix(&val.it_value)) != 0 || 810 (error = itimespecfix(&val.it_interval)) != 0) 811 return error; 812 813 mutex_spin_enter(&timer_lock); 814 if ((pt = pts->pts_timers[timerid]) == NULL) { 815 mutex_spin_exit(&timer_lock); 816 return EINVAL; 817 } 818 819 oval = pt->pt_time; 820 pt->pt_time = val; 821 822 /* 823 * If we've been passed a relative time for a realtime timer, 824 * convert it to absolute; if an absolute time for a virtual 825 * timer, convert it to relative and make sure we don't set it 826 * to zero, which would cancel the timer, or let it go 827 * negative, which would confuse the comparison tests. 828 */ 829 if (timespecisset(&pt->pt_time.it_value)) { 830 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 831 if ((flags & TIMER_ABSTIME) == 0) { 832 if (pt->pt_type == CLOCK_REALTIME) { 833 getnanotime(&now); 834 } else { /* CLOCK_MONOTONIC */ 835 getnanouptime(&now); 836 } 837 timespecadd(&pt->pt_time.it_value, &now, 838 &pt->pt_time.it_value); 839 } 840 } else { 841 if ((flags & TIMER_ABSTIME) != 0) { 842 getnanotime(&now); 843 timespecsub(&pt->pt_time.it_value, &now, 844 &pt->pt_time.it_value); 845 if (!timespecisset(&pt->pt_time.it_value) || 846 pt->pt_time.it_value.tv_sec < 0) { 847 pt->pt_time.it_value.tv_sec = 0; 848 pt->pt_time.it_value.tv_nsec = 1; 849 } 850 } 851 } 852 } 853 854 timer_settime(pt); 855 mutex_spin_exit(&timer_lock); 856 857 if (ovalue) 858 *ovalue = oval; 859 860 return (0); 861} 862 863/* Return the time remaining until a POSIX timer fires. */ 864int 865sys___timer_gettime50(struct lwp *l, 866 const struct sys___timer_gettime50_args *uap, register_t *retval) 867{ 868 /* { 869 syscallarg(timer_t) timerid; 870 syscallarg(struct itimerspec *) value; 871 } */ 872 struct itimerspec its; 873 int error; 874 875 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 876 &its)) != 0) 877 return error; 878 879 return copyout(&its, SCARG(uap, value), sizeof(its)); 880} 881 882int 883dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 884{ 885 struct ptimer *pt; 886 struct ptimers *pts; 887 888 pts = p->p_timers; 889 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 890 return (EINVAL); 891 mutex_spin_enter(&timer_lock); 892 if ((pt = pts->pts_timers[timerid]) == NULL) { 893 mutex_spin_exit(&timer_lock); 894 return (EINVAL); 895 } 896 timer_gettime(pt, its); 897 mutex_spin_exit(&timer_lock); 898 899 return 0; 900} 901 902/* 903 * Return the count of the number of times a periodic timer expired 904 * while a notification was already pending. The counter is reset when 905 * a timer expires and a notification can be posted. 906 */ 907int 908sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 909 register_t *retval) 910{ 911 /* { 912 syscallarg(timer_t) timerid; 913 } */ 914 struct proc *p = l->l_proc; 915 struct ptimers *pts; 916 int timerid; 917 struct ptimer *pt; 918 919 timerid = SCARG(uap, timerid); 920 921 pts = p->p_timers; 922 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 923 return (EINVAL); 924 mutex_spin_enter(&timer_lock); 925 if ((pt = pts->pts_timers[timerid]) == NULL) { 926 mutex_spin_exit(&timer_lock); 927 return (EINVAL); 928 } 929 *retval = pt->pt_poverruns; 930 mutex_spin_exit(&timer_lock); 931 932 return (0); 933} 934 935/* 936 * Real interval timer expired: 937 * send process whose timer expired an alarm signal. 938 * If time is not set up to reload, then just return. 939 * Else compute next time timer should go off which is > current time. 940 * This is where delay in processing this timeout causes multiple 941 * SIGALRM calls to be compressed into one. 942 */ 943void 944realtimerexpire(void *arg) 945{ 946 uint64_t last_val, next_val, interval, now_ns; 947 struct timespec now, next; 948 struct ptimer *pt; 949 int backwards; 950 951 pt = arg; 952 953 mutex_spin_enter(&timer_lock); 954 itimerfire(pt); 955 956 if (!timespecisset(&pt->pt_time.it_interval)) { 957 timespecclear(&pt->pt_time.it_value); 958 mutex_spin_exit(&timer_lock); 959 return; 960 } 961 962 if (pt->pt_type == CLOCK_MONOTONIC) { 963 getnanouptime(&now); 964 } else { 965 getnanotime(&now); 966 } 967 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 968 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 969 /* Handle the easy case of non-overflown timers first. */ 970 if (!backwards && timespeccmp(&next, &now, >)) { 971 pt->pt_time.it_value = next; 972 } else { 973 now_ns = timespec2ns(&now); 974 last_val = timespec2ns(&pt->pt_time.it_value); 975 interval = timespec2ns(&pt->pt_time.it_interval); 976 977 next_val = now_ns + 978 (now_ns - last_val + interval - 1) % interval; 979 980 if (backwards) 981 next_val += interval; 982 else 983 pt->pt_overruns += (now_ns - last_val) / interval; 984 985 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 986 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 987 } 988 989 /* 990 * Don't need to check tshzto() return value, here. 991 * callout_reset() does it for us. 992 */ 993 callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ? 994 tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value), 995 realtimerexpire, pt); 996 mutex_spin_exit(&timer_lock); 997} 998 999/* BSD routine to get the value of an interval timer. */ 1000/* ARGSUSED */ 1001int 1002sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1003 register_t *retval) 1004{ 1005 /* { 1006 syscallarg(int) which; 1007 syscallarg(struct itimerval *) itv; 1008 } */ 1009 struct proc *p = l->l_proc; 1010 struct itimerval aitv; 1011 int error; 1012 1013 error = dogetitimer(p, SCARG(uap, which), &aitv); 1014 if (error) 1015 return error; 1016 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1017} 1018 1019int 1020dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1021{ 1022 struct ptimers *pts; 1023 struct ptimer *pt; 1024 struct itimerspec its; 1025 1026 if ((u_int)which > ITIMER_MONOTONIC) 1027 return (EINVAL); 1028 1029 mutex_spin_enter(&timer_lock); 1030 pts = p->p_timers; 1031 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1032 timerclear(&itvp->it_value); 1033 timerclear(&itvp->it_interval); 1034 } else { 1035 timer_gettime(pt, &its); 1036 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1037 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1038 } 1039 mutex_spin_exit(&timer_lock); 1040 1041 return 0; 1042} 1043 1044/* BSD routine to set/arm an interval timer. */ 1045/* ARGSUSED */ 1046int 1047sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1048 register_t *retval) 1049{ 1050 /* { 1051 syscallarg(int) which; 1052 syscallarg(const struct itimerval *) itv; 1053 syscallarg(struct itimerval *) oitv; 1054 } */ 1055 struct proc *p = l->l_proc; 1056 int which = SCARG(uap, which); 1057 struct sys___getitimer50_args getargs; 1058 const struct itimerval *itvp; 1059 struct itimerval aitv; 1060 int error; 1061 1062 if ((u_int)which > ITIMER_MONOTONIC) 1063 return (EINVAL); 1064 itvp = SCARG(uap, itv); 1065 if (itvp && 1066 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1067 return (error); 1068 if (SCARG(uap, oitv) != NULL) { 1069 SCARG(&getargs, which) = which; 1070 SCARG(&getargs, itv) = SCARG(uap, oitv); 1071 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1072 return (error); 1073 } 1074 if (itvp == 0) 1075 return (0); 1076 1077 return dosetitimer(p, which, &aitv); 1078} 1079 1080int 1081dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1082{ 1083 struct timespec now; 1084 struct ptimers *pts; 1085 struct ptimer *pt, *spare; 1086 1087 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1088 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1089 return (EINVAL); 1090 1091 /* 1092 * Don't bother allocating data structures if the process just 1093 * wants to clear the timer. 1094 */ 1095 spare = NULL; 1096 pts = p->p_timers; 1097 retry: 1098 if (!timerisset(&itvp->it_value) && (pts == NULL || 1099 pts->pts_timers[which] == NULL)) 1100 return (0); 1101 if (pts == NULL) 1102 pts = timers_alloc(p); 1103 mutex_spin_enter(&timer_lock); 1104 pt = pts->pts_timers[which]; 1105 if (pt == NULL) { 1106 if (spare == NULL) { 1107 mutex_spin_exit(&timer_lock); 1108 spare = pool_get(&ptimer_pool, PR_WAITOK); 1109 goto retry; 1110 } 1111 pt = spare; 1112 spare = NULL; 1113 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1114 pt->pt_ev.sigev_value.sival_int = which; 1115 pt->pt_overruns = 0; 1116 pt->pt_proc = p; 1117 pt->pt_type = which; 1118 pt->pt_entry = which; 1119 pt->pt_queued = false; 1120 if (pt->pt_type == CLOCK_REALTIME) 1121 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1122 else 1123 pt->pt_active = 0; 1124 1125 switch (which) { 1126 case ITIMER_REAL: 1127 case ITIMER_MONOTONIC: 1128 pt->pt_ev.sigev_signo = SIGALRM; 1129 break; 1130 case ITIMER_VIRTUAL: 1131 pt->pt_ev.sigev_signo = SIGVTALRM; 1132 break; 1133 case ITIMER_PROF: 1134 pt->pt_ev.sigev_signo = SIGPROF; 1135 break; 1136 } 1137 pts->pts_timers[which] = pt; 1138 } 1139 1140 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1141 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1142 1143 if (timespecisset(&pt->pt_time.it_value)) { 1144 /* Convert to absolute time */ 1145 /* XXX need to wrap in splclock for timecounters case? */ 1146 switch (which) { 1147 case ITIMER_REAL: 1148 getnanotime(&now); 1149 timespecadd(&pt->pt_time.it_value, &now, 1150 &pt->pt_time.it_value); 1151 break; 1152 case ITIMER_MONOTONIC: 1153 getnanouptime(&now); 1154 timespecadd(&pt->pt_time.it_value, &now, 1155 &pt->pt_time.it_value); 1156 break; 1157 default: 1158 break; 1159 } 1160 } 1161 timer_settime(pt); 1162 mutex_spin_exit(&timer_lock); 1163 if (spare != NULL) 1164 pool_put(&ptimer_pool, spare); 1165 1166 return (0); 1167} 1168 1169/* Utility routines to manage the array of pointers to timers. */ 1170struct ptimers * 1171timers_alloc(struct proc *p) 1172{ 1173 struct ptimers *pts; 1174 int i; 1175 1176 pts = pool_get(&ptimers_pool, PR_WAITOK); 1177 LIST_INIT(&pts->pts_virtual); 1178 LIST_INIT(&pts->pts_prof); 1179 for (i = 0; i < TIMER_MAX; i++) 1180 pts->pts_timers[i] = NULL; 1181 pts->pts_fired = 0; 1182 mutex_spin_enter(&timer_lock); 1183 if (p->p_timers == NULL) { 1184 p->p_timers = pts; 1185 mutex_spin_exit(&timer_lock); 1186 return pts; 1187 } 1188 mutex_spin_exit(&timer_lock); 1189 pool_put(&ptimers_pool, pts); 1190 return p->p_timers; 1191} 1192 1193/* 1194 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1195 * then clean up all timers and free all the data structures. If 1196 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1197 * by timer_create(), not the BSD setitimer() timers, and only free the 1198 * structure if none of those remain. 1199 */ 1200void 1201timers_free(struct proc *p, int which) 1202{ 1203 struct ptimers *pts; 1204 struct ptimer *ptn; 1205 struct timespec ts; 1206 int i; 1207 1208 if (p->p_timers == NULL) 1209 return; 1210 1211 pts = p->p_timers; 1212 mutex_spin_enter(&timer_lock); 1213 if (which == TIMERS_ALL) { 1214 p->p_timers = NULL; 1215 i = 0; 1216 } else { 1217 timespecclear(&ts); 1218 for (ptn = LIST_FIRST(&pts->pts_virtual); 1219 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1220 ptn = LIST_NEXT(ptn, pt_list)) { 1221 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1222 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1223 } 1224 LIST_FIRST(&pts->pts_virtual) = NULL; 1225 if (ptn) { 1226 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1227 timespecadd(&ts, &ptn->pt_time.it_value, 1228 &ptn->pt_time.it_value); 1229 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1230 } 1231 timespecclear(&ts); 1232 for (ptn = LIST_FIRST(&pts->pts_prof); 1233 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1234 ptn = LIST_NEXT(ptn, pt_list)) { 1235 KASSERT(ptn->pt_type == CLOCK_PROF); 1236 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1237 } 1238 LIST_FIRST(&pts->pts_prof) = NULL; 1239 if (ptn) { 1240 KASSERT(ptn->pt_type == CLOCK_PROF); 1241 timespecadd(&ts, &ptn->pt_time.it_value, 1242 &ptn->pt_time.it_value); 1243 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1244 } 1245 i = 3; 1246 } 1247 for ( ; i < TIMER_MAX; i++) { 1248 if (pts->pts_timers[i] != NULL) { 1249 itimerfree(pts, i); 1250 mutex_spin_enter(&timer_lock); 1251 } 1252 } 1253 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1254 pts->pts_timers[2] == NULL) { 1255 p->p_timers = NULL; 1256 mutex_spin_exit(&timer_lock); 1257 pool_put(&ptimers_pool, pts); 1258 } else 1259 mutex_spin_exit(&timer_lock); 1260} 1261 1262static void 1263itimerfree(struct ptimers *pts, int index) 1264{ 1265 struct ptimer *pt; 1266 1267 KASSERT(mutex_owned(&timer_lock)); 1268 1269 pt = pts->pts_timers[index]; 1270 pts->pts_timers[index] = NULL; 1271 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1272 callout_halt(&pt->pt_ch, &timer_lock); 1273 if (pt->pt_queued) 1274 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1275 mutex_spin_exit(&timer_lock); 1276 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1277 callout_destroy(&pt->pt_ch); 1278 pool_put(&ptimer_pool, pt); 1279} 1280 1281/* 1282 * Decrement an interval timer by a specified number 1283 * of nanoseconds, which must be less than a second, 1284 * i.e. < 1000000000. If the timer expires, then reload 1285 * it. In this case, carry over (nsec - old value) to 1286 * reduce the value reloaded into the timer so that 1287 * the timer does not drift. This routine assumes 1288 * that it is called in a context where the timers 1289 * on which it is operating cannot change in value. 1290 */ 1291static int 1292itimerdecr(struct ptimer *pt, int nsec) 1293{ 1294 struct itimerspec *itp; 1295 1296 KASSERT(mutex_owned(&timer_lock)); 1297 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1298 1299 itp = &pt->pt_time; 1300 if (itp->it_value.tv_nsec < nsec) { 1301 if (itp->it_value.tv_sec == 0) { 1302 /* expired, and already in next interval */ 1303 nsec -= itp->it_value.tv_nsec; 1304 goto expire; 1305 } 1306 itp->it_value.tv_nsec += 1000000000; 1307 itp->it_value.tv_sec--; 1308 } 1309 itp->it_value.tv_nsec -= nsec; 1310 nsec = 0; 1311 if (timespecisset(&itp->it_value)) 1312 return (1); 1313 /* expired, exactly at end of interval */ 1314expire: 1315 if (timespecisset(&itp->it_interval)) { 1316 itp->it_value = itp->it_interval; 1317 itp->it_value.tv_nsec -= nsec; 1318 if (itp->it_value.tv_nsec < 0) { 1319 itp->it_value.tv_nsec += 1000000000; 1320 itp->it_value.tv_sec--; 1321 } 1322 timer_settime(pt); 1323 } else 1324 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1325 return (0); 1326} 1327 1328static void 1329itimerfire(struct ptimer *pt) 1330{ 1331 1332 KASSERT(mutex_owned(&timer_lock)); 1333 1334 /* 1335 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1336 * XXX Relying on the clock interrupt is stupid. 1337 */ 1338 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1339 return; 1340 } 1341 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1342 pt->pt_queued = true; 1343 softint_schedule(timer_sih); 1344} 1345 1346void 1347timer_tick(lwp_t *l, bool user) 1348{ 1349 struct ptimers *pts; 1350 struct ptimer *pt; 1351 proc_t *p; 1352 1353 p = l->l_proc; 1354 if (p->p_timers == NULL) 1355 return; 1356 1357 mutex_spin_enter(&timer_lock); 1358 if ((pts = l->l_proc->p_timers) != NULL) { 1359 /* 1360 * Run current process's virtual and profile time, as needed. 1361 */ 1362 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1363 if (itimerdecr(pt, tick * 1000) == 0) 1364 itimerfire(pt); 1365 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1366 if (itimerdecr(pt, tick * 1000) == 0) 1367 itimerfire(pt); 1368 } 1369 mutex_spin_exit(&timer_lock); 1370} 1371 1372static void 1373timer_intr(void *cookie) 1374{ 1375 ksiginfo_t ksi; 1376 struct ptimer *pt; 1377 proc_t *p; 1378 1379 mutex_enter(proc_lock); 1380 mutex_spin_enter(&timer_lock); 1381 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1382 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1383 KASSERT(pt->pt_queued); 1384 pt->pt_queued = false; 1385 1386 if (pt->pt_proc->p_timers == NULL) { 1387 /* Process is dying. */ 1388 continue; 1389 } 1390 p = pt->pt_proc; 1391 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1392 continue; 1393 } 1394 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1395 pt->pt_overruns++; 1396 continue; 1397 } 1398 1399 KSI_INIT(&ksi); 1400 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1401 ksi.ksi_code = SI_TIMER; 1402 ksi.ksi_value = pt->pt_ev.sigev_value; 1403 pt->pt_poverruns = pt->pt_overruns; 1404 pt->pt_overruns = 0; 1405 mutex_spin_exit(&timer_lock); 1406 kpsignal(p, &ksi, NULL); 1407 mutex_spin_enter(&timer_lock); 1408 } 1409 mutex_spin_exit(&timer_lock); 1410 mutex_exit(proc_lock); 1411} 1412 1413/* 1414 * Check if the time will wrap if set to ts. 1415 * 1416 * ts - timespec describing the new time 1417 * delta - the delta between the current time and ts 1418 */ 1419bool 1420time_wraps(struct timespec *ts, struct timespec *delta) 1421{ 1422 1423 /* 1424 * Don't allow the time to be set forward so far it 1425 * will wrap and become negative, thus allowing an 1426 * attacker to bypass the next check below. The 1427 * cutoff is 1 year before rollover occurs, so even 1428 * if the attacker uses adjtime(2) to move the time 1429 * past the cutoff, it will take a very long time 1430 * to get to the wrap point. 1431 */ 1432 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1433 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1434 return true; 1435 1436 return false; 1437} 1438