1/* 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 */ 33 34#include <mach/mach_types.h> 35 36#include <kern/lock.h> 37#include <kern/spl.h> 38#include <kern/sched_prim.h> 39#include <kern/thread.h> 40#include <kern/clock.h> 41#include <kern/host_notify.h> 42 43#include <IOKit/IOPlatformExpert.h> 44 45#include <machine/commpage.h> 46 47#include <mach/mach_traps.h> 48#include <mach/mach_time.h> 49 50uint32_t hz_tick_interval = 1; 51 52 53decl_simple_lock_data(,clock_lock) 54 55#define clock_lock() \ 56 simple_lock(&clock_lock) 57 58#define clock_unlock() \ 59 simple_unlock(&clock_lock) 60 61#define clock_lock_init() \ 62 simple_lock_init(&clock_lock, 0) 63 64 65/* 66 * Time of day (calendar) variables. 67 * 68 * Algorithm: 69 * 70 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset) 71 * 72 * where CONV converts absolute time units into seconds and a fraction. 73 */ 74static struct clock_calend { 75 uint64_t epoch; 76 uint64_t offset; 77 78 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */ 79 uint64_t adjstart; /* Absolute time value for start of this adjustment period */ 80 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */ 81} clock_calend; 82 83#if CONFIG_DTRACE 84 85/* 86 * Unlocked calendar flipflop; this is used to track a clock_calend such 87 * that we can safely access a snapshot of a valid clock_calend structure 88 * without needing to take any locks to do it. 89 * 90 * The trick is to use a generation count and set the low bit when it is 91 * being updated/read; by doing this, we guarantee, through use of the 92 * hw_atomic functions, that the generation is incremented when the bit 93 * is cleared atomically (by using a 1 bit add). 94 */ 95static struct unlocked_clock_calend { 96 struct clock_calend calend; /* copy of calendar */ 97 uint32_t gen; /* generation count */ 98} flipflop[ 2]; 99 100static void clock_track_calend_nowait(void); 101 102#endif 103 104/* 105 * Calendar adjustment variables and values. 106 */ 107#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */ 108#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */ 109#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */ 110 111static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */ 112static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */ 113static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */ 114 115static timer_call_data_t calend_adjcall; 116static uint32_t calend_adjactive; 117 118static uint32_t calend_set_adjustment( 119 long *secs, 120 int *microsecs); 121 122static void calend_adjust_call(void); 123static uint32_t calend_adjust(void); 124 125static thread_call_data_t calend_wakecall; 126 127extern void IOKitResetTime(void); 128 129void _clock_delay_until_deadline(uint64_t interval, 130 uint64_t deadline); 131 132static uint64_t clock_boottime; /* Seconds boottime epoch */ 133 134#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \ 135MACRO_BEGIN \ 136 if (((rfrac) += (frac)) >= (unit)) { \ 137 (rfrac) -= (unit); \ 138 (rsecs) += 1; \ 139 } \ 140 (rsecs) += (secs); \ 141MACRO_END 142 143#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \ 144MACRO_BEGIN \ 145 if ((int)((rfrac) -= (frac)) < 0) { \ 146 (rfrac) += (unit); \ 147 (rsecs) -= 1; \ 148 } \ 149 (rsecs) -= (secs); \ 150MACRO_END 151 152/* 153 * clock_config: 154 * 155 * Called once at boot to configure the clock subsystem. 156 */ 157void 158clock_config(void) 159{ 160 clock_lock_init(); 161 162 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL); 163 thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL); 164 165 clock_oldconfig(); 166} 167 168/* 169 * clock_init: 170 * 171 * Called on a processor each time started. 172 */ 173void 174clock_init(void) 175{ 176 clock_oldinit(); 177} 178 179/* 180 * clock_timebase_init: 181 * 182 * Called by machine dependent code 183 * to initialize areas dependent on the 184 * timebase value. May be called multiple 185 * times during start up. 186 */ 187void 188clock_timebase_init(void) 189{ 190 uint64_t abstime; 191 192 nanoseconds_to_absolutetime(calend_adjperiod, &abstime); 193 calend_adjinterval = (uint32_t)abstime; 194 195 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime); 196 hz_tick_interval = (uint32_t)abstime; 197 198 sched_timebase_init(); 199} 200 201/* 202 * mach_timebase_info_trap: 203 * 204 * User trap returns timebase constant. 205 */ 206kern_return_t 207mach_timebase_info_trap( 208 struct mach_timebase_info_trap_args *args) 209{ 210 mach_vm_address_t out_info_addr = args->info; 211 mach_timebase_info_data_t info; 212 213 clock_timebase_info(&info); 214 215 copyout((void *)&info, out_info_addr, sizeof (info)); 216 217 return (KERN_SUCCESS); 218} 219 220/* 221 * Calendar routines. 222 */ 223 224/* 225 * clock_get_calendar_microtime: 226 * 227 * Returns the current calendar value, 228 * microseconds as the fraction. 229 */ 230void 231clock_get_calendar_microtime( 232 clock_sec_t *secs, 233 clock_usec_t *microsecs) 234{ 235 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL); 236} 237 238/* 239 * clock_get_calendar_absolute_and_microtime: 240 * 241 * Returns the current calendar value, 242 * microseconds as the fraction. Also 243 * returns mach_absolute_time if abstime 244 * is not NULL. 245 */ 246void 247clock_get_calendar_absolute_and_microtime( 248 clock_sec_t *secs, 249 clock_usec_t *microsecs, 250 uint64_t *abstime) 251{ 252 uint64_t now; 253 spl_t s; 254 255 s = splclock(); 256 clock_lock(); 257 258 now = mach_absolute_time(); 259 if (abstime) 260 *abstime = now; 261 262 if (clock_calend.adjdelta < 0) { 263 uint32_t t32; 264 265 /* 266 * Since offset is decremented during a negative adjustment, 267 * ensure that time increases monotonically without going 268 * temporarily backwards. 269 * If the delta has not yet passed, now is set to the start 270 * of the current adjustment period; otherwise, we're between 271 * the expiry of the delta and the next call to calend_adjust(), 272 * and we offset accordingly. 273 */ 274 if (now > clock_calend.adjstart) { 275 t32 = (uint32_t)(now - clock_calend.adjstart); 276 277 if (t32 > clock_calend.adjoffset) 278 now -= clock_calend.adjoffset; 279 else 280 now = clock_calend.adjstart; 281 } 282 } 283 284 now += clock_calend.offset; 285 286 absolutetime_to_microtime(now, secs, microsecs); 287 288 *secs += (clock_sec_t)clock_calend.epoch; 289 290 clock_unlock(); 291 splx(s); 292} 293 294/* 295 * clock_get_calendar_nanotime: 296 * 297 * Returns the current calendar value, 298 * nanoseconds as the fraction. 299 * 300 * Since we do not have an interface to 301 * set the calendar with resolution greater 302 * than a microsecond, we honor that here. 303 */ 304void 305clock_get_calendar_nanotime( 306 clock_sec_t *secs, 307 clock_nsec_t *nanosecs) 308{ 309 uint64_t now; 310 spl_t s; 311 312 s = splclock(); 313 clock_lock(); 314 315 now = mach_absolute_time(); 316 317 if (clock_calend.adjdelta < 0) { 318 uint32_t t32; 319 320 if (now > clock_calend.adjstart) { 321 t32 = (uint32_t)(now - clock_calend.adjstart); 322 323 if (t32 > clock_calend.adjoffset) 324 now -= clock_calend.adjoffset; 325 else 326 now = clock_calend.adjstart; 327 } 328 } 329 330 now += clock_calend.offset; 331 332 absolutetime_to_microtime(now, secs, nanosecs); 333 334 *nanosecs *= NSEC_PER_USEC; 335 336 *secs += (clock_sec_t)clock_calend.epoch; 337 338 clock_unlock(); 339 splx(s); 340} 341 342/* 343 * clock_gettimeofday: 344 * 345 * Kernel interface for commpage implementation of 346 * gettimeofday() syscall. 347 * 348 * Returns the current calendar value, and updates the 349 * commpage info as appropriate. Because most calls to 350 * gettimeofday() are handled in user mode by the commpage, 351 * this routine should be used infrequently. 352 */ 353void 354clock_gettimeofday( 355 clock_sec_t *secs, 356 clock_usec_t *microsecs) 357{ 358 uint64_t now; 359 spl_t s; 360 361 s = splclock(); 362 clock_lock(); 363 364 now = mach_absolute_time(); 365 366 if (clock_calend.adjdelta >= 0) { 367 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs); 368 } 369 else { 370 uint32_t t32; 371 372 if (now > clock_calend.adjstart) { 373 t32 = (uint32_t)(now - clock_calend.adjstart); 374 375 if (t32 > clock_calend.adjoffset) 376 now -= clock_calend.adjoffset; 377 else 378 now = clock_calend.adjstart; 379 } 380 381 now += clock_calend.offset; 382 383 absolutetime_to_microtime(now, secs, microsecs); 384 385 *secs += (clock_sec_t)clock_calend.epoch; 386 } 387 388 clock_unlock(); 389 splx(s); 390} 391 392/* 393 * clock_set_calendar_microtime: 394 * 395 * Sets the current calendar value by 396 * recalculating the epoch and offset 397 * from the system clock. 398 * 399 * Also adjusts the boottime to keep the 400 * value consistent, writes the new 401 * calendar value to the platform clock, 402 * and sends calendar change notifications. 403 */ 404void 405clock_set_calendar_microtime( 406 clock_sec_t secs, 407 clock_usec_t microsecs) 408{ 409 clock_sec_t sys; 410 clock_usec_t microsys; 411 clock_sec_t newsecs; 412 spl_t s; 413 414 newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1; 415 416 s = splclock(); 417 clock_lock(); 418 419 commpage_disable_timestamp(); 420 421 /* 422 * Calculate the new calendar epoch based on 423 * the new value and the system clock. 424 */ 425 clock_get_system_microtime(&sys, µsys); 426 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); 427 428 /* 429 * Adjust the boottime based on the delta. 430 */ 431 clock_boottime += secs - clock_calend.epoch; 432 433 /* 434 * Set the new calendar epoch. 435 */ 436 clock_calend.epoch = secs; 437 438 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset); 439 440 /* 441 * Cancel any adjustment in progress. 442 */ 443 calend_adjtotal = clock_calend.adjdelta = 0; 444 445 clock_unlock(); 446 447 /* 448 * Set the new value for the platform clock. 449 */ 450 PESetGMTTimeOfDay(newsecs); 451 452 splx(s); 453 454 /* 455 * Send host notifications. 456 */ 457 host_notify_calendar_change(); 458 459#if CONFIG_DTRACE 460 clock_track_calend_nowait(); 461#endif 462} 463 464/* 465 * clock_initialize_calendar: 466 * 467 * Set the calendar and related clocks 468 * from the platform clock at boot or 469 * wake event. 470 * 471 * Also sends host notifications. 472 */ 473void 474clock_initialize_calendar(void) 475{ 476 clock_sec_t sys, secs = PEGetGMTTimeOfDay(); 477 clock_usec_t microsys, microsecs = 0; 478 spl_t s; 479 480 s = splclock(); 481 clock_lock(); 482 483 commpage_disable_timestamp(); 484 485 if ((long)secs >= (long)clock_boottime) { 486 /* 487 * Initialize the boot time based on the platform clock. 488 */ 489 if (clock_boottime == 0) 490 clock_boottime = secs; 491 492 /* 493 * Calculate the new calendar epoch based on 494 * the platform clock and the system clock. 495 */ 496 clock_get_system_microtime(&sys, µsys); 497 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); 498 499 /* 500 * Set the new calendar epoch. 501 */ 502 clock_calend.epoch = secs; 503 504 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset); 505 506 /* 507 * Cancel any adjustment in progress. 508 */ 509 calend_adjtotal = clock_calend.adjdelta = 0; 510 } 511 512 clock_unlock(); 513 splx(s); 514 515 /* 516 * Send host notifications. 517 */ 518 host_notify_calendar_change(); 519 520#if CONFIG_DTRACE 521 clock_track_calend_nowait(); 522#endif 523} 524 525/* 526 * clock_get_boottime_nanotime: 527 * 528 * Return the boottime, used by sysctl. 529 */ 530void 531clock_get_boottime_nanotime( 532 clock_sec_t *secs, 533 clock_nsec_t *nanosecs) 534{ 535 spl_t s; 536 537 s = splclock(); 538 clock_lock(); 539 540 *secs = (clock_sec_t)clock_boottime; 541 *nanosecs = 0; 542 543 clock_unlock(); 544 splx(s); 545} 546 547/* 548 * clock_adjtime: 549 * 550 * Interface to adjtime() syscall. 551 * 552 * Calculates adjustment variables and 553 * initiates adjustment. 554 */ 555void 556clock_adjtime( 557 long *secs, 558 int *microsecs) 559{ 560 uint32_t interval; 561 spl_t s; 562 563 s = splclock(); 564 clock_lock(); 565 566 interval = calend_set_adjustment(secs, microsecs); 567 if (interval != 0) { 568 calend_adjdeadline = mach_absolute_time() + interval; 569 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL)) 570 calend_adjactive++; 571 } 572 else 573 if (timer_call_cancel(&calend_adjcall)) 574 calend_adjactive--; 575 576 clock_unlock(); 577 splx(s); 578} 579 580static uint32_t 581calend_set_adjustment( 582 long *secs, 583 int *microsecs) 584{ 585 uint64_t now, t64; 586 int64_t total, ototal; 587 uint32_t interval = 0; 588 589 /* 590 * Compute the total adjustment time in nanoseconds. 591 */ 592 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC); 593 594 /* 595 * Disable commpage gettimeofday(). 596 */ 597 commpage_disable_timestamp(); 598 599 /* 600 * Get current absolute time. 601 */ 602 now = mach_absolute_time(); 603 604 /* 605 * Save the old adjustment total for later return. 606 */ 607 ototal = calend_adjtotal; 608 609 /* 610 * Is a new correction specified? 611 */ 612 if (total != 0) { 613 /* 614 * Set delta to the standard, small, adjustment skew. 615 */ 616 int32_t delta = calend_adjskew; 617 618 if (total > 0) { 619 /* 620 * Positive adjustment. If greater than the preset 'big' 621 * threshold, slew at a faster rate, capping if necessary. 622 */ 623 if (total > (int64_t) calend_adjbig) 624 delta *= 10; 625 if (delta > total) 626 delta = (int32_t)total; 627 628 /* 629 * Convert the delta back from ns to absolute time and store in adjoffset. 630 */ 631 nanoseconds_to_absolutetime((uint64_t)delta, &t64); 632 clock_calend.adjoffset = (uint32_t)t64; 633 } 634 else { 635 /* 636 * Negative adjustment; therefore, negate the delta. If 637 * greater than the preset 'big' threshold, slew at a faster 638 * rate, capping if necessary. 639 */ 640 if (total < (int64_t) -calend_adjbig) 641 delta *= 10; 642 delta = -delta; 643 if (delta < total) 644 delta = (int32_t)total; 645 646 /* 647 * Save the current absolute time. Subsequent time operations occuring 648 * during this negative correction can make use of this value to ensure 649 * that time increases monotonically. 650 */ 651 clock_calend.adjstart = now; 652 653 /* 654 * Convert the delta back from ns to absolute time and store in adjoffset. 655 */ 656 nanoseconds_to_absolutetime((uint64_t)-delta, &t64); 657 clock_calend.adjoffset = (uint32_t)t64; 658 } 659 660 /* 661 * Store the total adjustment time in ns. 662 */ 663 calend_adjtotal = total; 664 665 /* 666 * Store the delta for this adjustment period in ns. 667 */ 668 clock_calend.adjdelta = delta; 669 670 /* 671 * Set the interval in absolute time for later return. 672 */ 673 interval = calend_adjinterval; 674 } 675 else { 676 /* 677 * No change; clear any prior adjustment. 678 */ 679 calend_adjtotal = clock_calend.adjdelta = 0; 680 } 681 682 /* 683 * If an prior correction was in progress, return the 684 * remaining uncorrected time from it. 685 */ 686 if (ototal != 0) { 687 *secs = (long)(ototal / (long)NSEC_PER_SEC); 688 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC); 689 } 690 else 691 *secs = *microsecs = 0; 692 693#if CONFIG_DTRACE 694 clock_track_calend_nowait(); 695#endif 696 697 return (interval); 698} 699 700static void 701calend_adjust_call(void) 702{ 703 uint32_t interval; 704 spl_t s; 705 706 s = splclock(); 707 clock_lock(); 708 709 if (--calend_adjactive == 0) { 710 interval = calend_adjust(); 711 if (interval != 0) { 712 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline); 713 714 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL)) 715 calend_adjactive++; 716 } 717 } 718 719 clock_unlock(); 720 splx(s); 721} 722 723static uint32_t 724calend_adjust(void) 725{ 726 uint64_t now, t64; 727 int32_t delta; 728 uint32_t interval = 0; 729 730 commpage_disable_timestamp(); 731 732 now = mach_absolute_time(); 733 734 delta = clock_calend.adjdelta; 735 736 if (delta > 0) { 737 clock_calend.offset += clock_calend.adjoffset; 738 739 calend_adjtotal -= delta; 740 if (delta > calend_adjtotal) { 741 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal; 742 743 nanoseconds_to_absolutetime((uint64_t)delta, &t64); 744 clock_calend.adjoffset = (uint32_t)t64; 745 } 746 } 747 else 748 if (delta < 0) { 749 clock_calend.offset -= clock_calend.adjoffset; 750 751 calend_adjtotal -= delta; 752 if (delta < calend_adjtotal) { 753 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal; 754 755 nanoseconds_to_absolutetime((uint64_t)-delta, &t64); 756 clock_calend.adjoffset = (uint32_t)t64; 757 } 758 759 if (clock_calend.adjdelta != 0) 760 clock_calend.adjstart = now; 761 } 762 763 if (clock_calend.adjdelta != 0) 764 interval = calend_adjinterval; 765 766#if CONFIG_DTRACE 767 clock_track_calend_nowait(); 768#endif 769 770 return (interval); 771} 772 773/* 774 * clock_wakeup_calendar: 775 * 776 * Interface to power management, used 777 * to initiate the reset of the calendar 778 * on wake from sleep event. 779 */ 780void 781clock_wakeup_calendar(void) 782{ 783 thread_call_enter(&calend_wakecall); 784} 785 786/* 787 * Wait / delay routines. 788 */ 789static void 790mach_wait_until_continue( 791 __unused void *parameter, 792 wait_result_t wresult) 793{ 794 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); 795 /*NOTREACHED*/ 796} 797 798/* 799 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed 800 * 801 * Parameters: args->deadline Amount of time to wait 802 * 803 * Returns: 0 Success 804 * !0 Not success 805 * 806 */ 807kern_return_t 808mach_wait_until_trap( 809 struct mach_wait_until_trap_args *args) 810{ 811 uint64_t deadline = args->deadline; 812 wait_result_t wresult; 813 814 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, 815 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); 816 if (wresult == THREAD_WAITING) 817 wresult = thread_block(mach_wait_until_continue); 818 819 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); 820} 821 822void 823clock_delay_until( 824 uint64_t deadline) 825{ 826 uint64_t now = mach_absolute_time(); 827 828 if (now >= deadline) 829 return; 830 831 _clock_delay_until_deadline(deadline - now, deadline); 832} 833 834/* 835 * Preserve the original precise interval that the client 836 * requested for comparison to the spin threshold. 837 */ 838void 839_clock_delay_until_deadline( 840 uint64_t interval, 841 uint64_t deadline) 842{ 843 844 if (interval == 0) 845 return; 846 847 if ( ml_delay_should_spin(interval) || 848 get_preemption_level() != 0 || 849 ml_get_interrupts_enabled() == FALSE ) { 850 machine_delay_until(interval, deadline); 851 } else { 852 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline); 853 854 thread_block(THREAD_CONTINUE_NULL); 855 } 856} 857 858 859void 860delay_for_interval( 861 uint32_t interval, 862 uint32_t scale_factor) 863{ 864 uint64_t abstime; 865 866 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); 867 868 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime); 869} 870 871void 872delay( 873 int usec) 874{ 875 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC); 876} 877 878/* 879 * Miscellaneous routines. 880 */ 881void 882clock_interval_to_deadline( 883 uint32_t interval, 884 uint32_t scale_factor, 885 uint64_t *result) 886{ 887 uint64_t abstime; 888 889 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); 890 891 *result = mach_absolute_time() + abstime; 892} 893 894void 895clock_absolutetime_interval_to_deadline( 896 uint64_t abstime, 897 uint64_t *result) 898{ 899 *result = mach_absolute_time() + abstime; 900} 901 902void 903clock_get_uptime( 904 uint64_t *result) 905{ 906 *result = mach_absolute_time(); 907} 908 909void 910clock_deadline_for_periodic_event( 911 uint64_t interval, 912 uint64_t abstime, 913 uint64_t *deadline) 914{ 915 assert(interval != 0); 916 917 *deadline += interval; 918 919 if (*deadline <= abstime) { 920 *deadline = abstime + interval; 921 abstime = mach_absolute_time(); 922 923 if (*deadline <= abstime) 924 *deadline = abstime + interval; 925 } 926} 927 928#if CONFIG_DTRACE 929 930/* 931 * clock_get_calendar_nanotime_nowait 932 * 933 * Description: Non-blocking version of clock_get_calendar_nanotime() 934 * 935 * Notes: This function operates by separately tracking calendar time 936 * updates using a two element structure to copy the calendar 937 * state, which may be asynchronously modified. It utilizes 938 * barrier instructions in the tracking process and in the local 939 * stable snapshot process in order to ensure that a consistent 940 * snapshot is used to perform the calculation. 941 */ 942void 943clock_get_calendar_nanotime_nowait( 944 clock_sec_t *secs, 945 clock_nsec_t *nanosecs) 946{ 947 int i = 0; 948 uint64_t now; 949 struct unlocked_clock_calend stable; 950 951 for (;;) { 952 stable = flipflop[i]; /* take snapshot */ 953 954 /* 955 * Use a barrier instructions to ensure atomicity. We AND 956 * off the "in progress" bit to get the current generation 957 * count. 958 */ 959 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1); 960 961 /* 962 * If an update _is_ in progress, the generation count will be 963 * off by one, if it _was_ in progress, it will be off by two, 964 * and if we caught it at a good time, it will be equal (and 965 * our snapshot is threfore stable). 966 */ 967 if (flipflop[i].gen == stable.gen) 968 break; 969 970 /* Switch to the oher element of the flipflop, and try again. */ 971 i ^= 1; 972 } 973 974 now = mach_absolute_time(); 975 976 if (stable.calend.adjdelta < 0) { 977 uint32_t t32; 978 979 if (now > stable.calend.adjstart) { 980 t32 = (uint32_t)(now - stable.calend.adjstart); 981 982 if (t32 > stable.calend.adjoffset) 983 now -= stable.calend.adjoffset; 984 else 985 now = stable.calend.adjstart; 986 } 987 } 988 989 now += stable.calend.offset; 990 991 absolutetime_to_microtime(now, secs, nanosecs); 992 *nanosecs *= NSEC_PER_USEC; 993 994 *secs += (clock_sec_t)stable.calend.epoch; 995} 996 997static void 998clock_track_calend_nowait(void) 999{ 1000 int i; 1001 1002 for (i = 0; i < 2; i++) { 1003 struct clock_calend tmp = clock_calend; 1004 1005 /* 1006 * Set the low bit if the generation count; since we use a 1007 * barrier instruction to do this, we are guaranteed that this 1008 * will flag an update in progress to an async caller trying 1009 * to examine the contents. 1010 */ 1011 (void)hw_atomic_or(&flipflop[i].gen, 1); 1012 1013 flipflop[i].calend = tmp; 1014 1015 /* 1016 * Increment the generation count to clear the low bit to 1017 * signal completion. If a caller compares the generation 1018 * count after taking a copy while in progress, the count 1019 * will be off by two. 1020 */ 1021 (void)hw_atomic_add(&flipflop[i].gen, 1); 1022 } 1023} 1024 1025#endif /* CONFIG_DTRACE */ 1026