1/* 2 * linux/kernel/hrtimer.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * High-resolution kernel timers 9 * 10 * In contrast to the low-resolution timeout API implemented in 11 * kernel/timer.c, hrtimers provide finer resolution and accuracy 12 * depending on system configuration and capabilities. 13 * 14 * These timers are currently used for: 15 * - itimers 16 * - POSIX timers 17 * - nanosleep 18 * - precise in-kernel timing 19 * 20 * Started by: Thomas Gleixner and Ingo Molnar 21 * 22 * Credits: 23 * based on kernel/timer.c 24 * 25 * Help, testing, suggestions, bugfixes, improvements were 26 * provided by: 27 * 28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel 29 * et. al. 30 * 31 * For licencing details see kernel-base/COPYING 32 */ 33 34#include <linux/cpu.h> 35#include <linux/module.h> 36#include <linux/percpu.h> 37#include <linux/hrtimer.h> 38#include <linux/notifier.h> 39#include <linux/syscalls.h> 40#include <linux/kallsyms.h> 41#include <linux/interrupt.h> 42#include <linux/tick.h> 43#include <linux/seq_file.h> 44#include <linux/err.h> 45#include <linux/debugobjects.h> 46#include <linux/sched.h> 47#include <linux/timer.h> 48 49#include <asm/uaccess.h> 50 51#include <trace/events/timer.h> 52 53/* 54 * The timer bases: 55 * 56 * Note: If we want to add new timer bases, we have to skip the two 57 * clock ids captured by the cpu-timers. We do this by holding empty 58 * entries rather than doing math adjustment of the clock ids. 59 * This ensures that we capture erroneous accesses to these clock ids 60 * rather than moving them into the range of valid clock id's. 61 */ 62DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 63{ 64 65 .clock_base = 66 { 67 { 68 .index = CLOCK_REALTIME, 69 .get_time = &ktime_get_real, 70 .resolution = KTIME_LOW_RES, 71 }, 72 { 73 .index = CLOCK_MONOTONIC, 74 .get_time = &ktime_get, 75 .resolution = KTIME_LOW_RES, 76 }, 77 } 78}; 79 80/* 81 * Get the coarse grained time at the softirq based on xtime and 82 * wall_to_monotonic. 83 */ 84static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) 85{ 86 ktime_t xtim, tomono; 87 struct timespec xts, tom; 88 unsigned long seq; 89 90 do { 91 seq = read_seqbegin(&xtime_lock); 92 xts = __current_kernel_time(); 93 tom = __get_wall_to_monotonic(); 94 } while (read_seqretry(&xtime_lock, seq)); 95 96 xtim = timespec_to_ktime(xts); 97 tomono = timespec_to_ktime(tom); 98 base->clock_base[CLOCK_REALTIME].softirq_time = xtim; 99 base->clock_base[CLOCK_MONOTONIC].softirq_time = 100 ktime_add(xtim, tomono); 101} 102 103/* 104 * Functions and macros which are different for UP/SMP systems are kept in a 105 * single place 106 */ 107#ifdef CONFIG_SMP 108 109/* 110 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock 111 * means that all timers which are tied to this base via timer->base are 112 * locked, and the base itself is locked too. 113 * 114 * So __run_timers/migrate_timers can safely modify all timers which could 115 * be found on the lists/queues. 116 * 117 * When the timer's base is locked, and the timer removed from list, it is 118 * possible to set timer->base = NULL and drop the lock: the timer remains 119 * locked. 120 */ 121static 122struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, 123 unsigned long *flags) 124{ 125 struct hrtimer_clock_base *base; 126 127 for (;;) { 128 base = timer->base; 129 if (likely(base != NULL)) { 130 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 131 if (likely(base == timer->base)) 132 return base; 133 /* The timer has migrated to another CPU: */ 134 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 135 } 136 cpu_relax(); 137 } 138} 139 140 141/* 142 * Get the preferred target CPU for NOHZ 143 */ 144static int hrtimer_get_target(int this_cpu, int pinned) 145{ 146#ifdef CONFIG_NO_HZ 147 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) 148 return get_nohz_timer_target(); 149#endif 150 return this_cpu; 151} 152 153/* 154 * With HIGHRES=y we do not migrate the timer when it is expiring 155 * before the next event on the target cpu because we cannot reprogram 156 * the target cpu hardware and we would cause it to fire late. 157 * 158 * Called with cpu_base->lock of target cpu held. 159 */ 160static int 161hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) 162{ 163#ifdef CONFIG_HIGH_RES_TIMERS 164 ktime_t expires; 165 166 if (!new_base->cpu_base->hres_active) 167 return 0; 168 169 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 170 return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 171#else 172 return 0; 173#endif 174} 175 176/* 177 * Switch the timer base to the current CPU when possible. 178 */ 179static inline struct hrtimer_clock_base * 180switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, 181 int pinned) 182{ 183 struct hrtimer_clock_base *new_base; 184 struct hrtimer_cpu_base *new_cpu_base; 185 int this_cpu = smp_processor_id(); 186 int cpu = hrtimer_get_target(this_cpu, pinned); 187 188again: 189 new_cpu_base = &per_cpu(hrtimer_bases, cpu); 190 new_base = &new_cpu_base->clock_base[base->index]; 191 192 if (base != new_base) { 193 /* 194 * We are trying to move timer to new_base. 195 * However we can't change timer's base while it is running, 196 * so we keep it on the same CPU. No hassle vs. reprogramming 197 * the event source in the high resolution case. The softirq 198 * code will take care of this when the timer function has 199 * completed. There is no conflict as we hold the lock until 200 * the timer is enqueued. 201 */ 202 if (unlikely(hrtimer_callback_running(timer))) 203 return base; 204 205 /* See the comment in lock_timer_base() */ 206 timer->base = NULL; 207 raw_spin_unlock(&base->cpu_base->lock); 208 raw_spin_lock(&new_base->cpu_base->lock); 209 210 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { 211 cpu = this_cpu; 212 raw_spin_unlock(&new_base->cpu_base->lock); 213 raw_spin_lock(&base->cpu_base->lock); 214 timer->base = base; 215 goto again; 216 } 217 timer->base = new_base; 218 } 219 return new_base; 220} 221 222#else /* CONFIG_SMP */ 223 224static inline struct hrtimer_clock_base * 225lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 226{ 227 struct hrtimer_clock_base *base = timer->base; 228 229 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 230 231 return base; 232} 233 234# define switch_hrtimer_base(t, b, p) (b) 235 236#endif /* !CONFIG_SMP */ 237 238/* 239 * Functions for the union type storage format of ktime_t which are 240 * too large for inlining: 241 */ 242#if BITS_PER_LONG < 64 243# ifndef CONFIG_KTIME_SCALAR 244/** 245 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable 246 * @kt: addend 247 * @nsec: the scalar nsec value to add 248 * 249 * Returns the sum of kt and nsec in ktime_t format 250 */ 251ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) 252{ 253 ktime_t tmp; 254 255 if (likely(nsec < NSEC_PER_SEC)) { 256 tmp.tv64 = nsec; 257 } else { 258 unsigned long rem = do_div(nsec, NSEC_PER_SEC); 259 260 tmp = ktime_set((long)nsec, rem); 261 } 262 263 return ktime_add(kt, tmp); 264} 265 266EXPORT_SYMBOL_GPL(ktime_add_ns); 267 268/** 269 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable 270 * @kt: minuend 271 * @nsec: the scalar nsec value to subtract 272 * 273 * Returns the subtraction of @nsec from @kt in ktime_t format 274 */ 275ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) 276{ 277 ktime_t tmp; 278 279 if (likely(nsec < NSEC_PER_SEC)) { 280 tmp.tv64 = nsec; 281 } else { 282 unsigned long rem = do_div(nsec, NSEC_PER_SEC); 283 284 tmp = ktime_set((long)nsec, rem); 285 } 286 287 return ktime_sub(kt, tmp); 288} 289 290EXPORT_SYMBOL_GPL(ktime_sub_ns); 291# endif /* !CONFIG_KTIME_SCALAR */ 292 293/* 294 * Divide a ktime value by a nanosecond value 295 */ 296u64 ktime_divns(const ktime_t kt, s64 div) 297{ 298 u64 dclc; 299 int sft = 0; 300 301 dclc = ktime_to_ns(kt); 302 /* Make sure the divisor is less than 2^32: */ 303 while (div >> 32) { 304 sft++; 305 div >>= 1; 306 } 307 dclc >>= sft; 308 do_div(dclc, (unsigned long) div); 309 310 return dclc; 311} 312#endif /* BITS_PER_LONG >= 64 */ 313 314/* 315 * Add two ktime values and do a safety check for overflow: 316 */ 317ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) 318{ 319 ktime_t res = ktime_add(lhs, rhs); 320 321 /* 322 * We use KTIME_SEC_MAX here, the maximum timeout which we can 323 * return to user space in a timespec: 324 */ 325 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 326 res = ktime_set(KTIME_SEC_MAX, 0); 327 328 return res; 329} 330 331EXPORT_SYMBOL_GPL(ktime_add_safe); 332 333#ifdef CONFIG_DEBUG_OBJECTS_TIMERS 334 335static struct debug_obj_descr hrtimer_debug_descr; 336 337/* 338 * fixup_init is called when: 339 * - an active object is initialized 340 */ 341static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) 342{ 343 struct hrtimer *timer = addr; 344 345 switch (state) { 346 case ODEBUG_STATE_ACTIVE: 347 hrtimer_cancel(timer); 348 debug_object_init(timer, &hrtimer_debug_descr); 349 return 1; 350 default: 351 return 0; 352 } 353} 354 355/* 356 * fixup_activate is called when: 357 * - an active object is activated 358 * - an unknown object is activated (might be a statically initialized object) 359 */ 360static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) 361{ 362 switch (state) { 363 364 case ODEBUG_STATE_NOTAVAILABLE: 365 WARN_ON_ONCE(1); 366 return 0; 367 368 case ODEBUG_STATE_ACTIVE: 369 WARN_ON(1); 370 371 default: 372 return 0; 373 } 374} 375 376/* 377 * fixup_free is called when: 378 * - an active object is freed 379 */ 380static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) 381{ 382 struct hrtimer *timer = addr; 383 384 switch (state) { 385 case ODEBUG_STATE_ACTIVE: 386 hrtimer_cancel(timer); 387 debug_object_free(timer, &hrtimer_debug_descr); 388 return 1; 389 default: 390 return 0; 391 } 392} 393 394static struct debug_obj_descr hrtimer_debug_descr = { 395 .name = "hrtimer", 396 .fixup_init = hrtimer_fixup_init, 397 .fixup_activate = hrtimer_fixup_activate, 398 .fixup_free = hrtimer_fixup_free, 399}; 400 401static inline void debug_hrtimer_init(struct hrtimer *timer) 402{ 403 debug_object_init(timer, &hrtimer_debug_descr); 404} 405 406static inline void debug_hrtimer_activate(struct hrtimer *timer) 407{ 408 debug_object_activate(timer, &hrtimer_debug_descr); 409} 410 411static inline void debug_hrtimer_deactivate(struct hrtimer *timer) 412{ 413 debug_object_deactivate(timer, &hrtimer_debug_descr); 414} 415 416static inline void debug_hrtimer_free(struct hrtimer *timer) 417{ 418 debug_object_free(timer, &hrtimer_debug_descr); 419} 420 421static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 422 enum hrtimer_mode mode); 423 424void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, 425 enum hrtimer_mode mode) 426{ 427 debug_object_init_on_stack(timer, &hrtimer_debug_descr); 428 __hrtimer_init(timer, clock_id, mode); 429} 430EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); 431 432void destroy_hrtimer_on_stack(struct hrtimer *timer) 433{ 434 debug_object_free(timer, &hrtimer_debug_descr); 435} 436 437#else 438static inline void debug_hrtimer_init(struct hrtimer *timer) { } 439static inline void debug_hrtimer_activate(struct hrtimer *timer) { } 440static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 441#endif 442 443static inline void 444debug_init(struct hrtimer *timer, clockid_t clockid, 445 enum hrtimer_mode mode) 446{ 447 debug_hrtimer_init(timer); 448 trace_hrtimer_init(timer, clockid, mode); 449} 450 451static inline void debug_activate(struct hrtimer *timer) 452{ 453 debug_hrtimer_activate(timer); 454 trace_hrtimer_start(timer); 455} 456 457static inline void debug_deactivate(struct hrtimer *timer) 458{ 459 debug_hrtimer_deactivate(timer); 460 trace_hrtimer_cancel(timer); 461} 462 463/* High resolution timer related functions */ 464#ifdef CONFIG_HIGH_RES_TIMERS 465 466/* 467 * High resolution timer enabled ? 468 */ 469static int hrtimer_hres_enabled __read_mostly = 1; 470 471/* 472 * Enable / Disable high resolution mode 473 */ 474static int __init setup_hrtimer_hres(char *str) 475{ 476 if (!strcmp(str, "off")) 477 hrtimer_hres_enabled = 0; 478 else if (!strcmp(str, "on")) 479 hrtimer_hres_enabled = 1; 480 else 481 return 0; 482 return 1; 483} 484 485__setup("highres=", setup_hrtimer_hres); 486 487/* 488 * hrtimer_high_res_enabled - query, if the highres mode is enabled 489 */ 490static inline int hrtimer_is_hres_enabled(void) 491{ 492 return hrtimer_hres_enabled; 493} 494 495/* 496 * Is the high resolution mode active ? 497 */ 498static inline int hrtimer_hres_active(void) 499{ 500 return __get_cpu_var(hrtimer_bases).hres_active; 501} 502 503/* 504 * Reprogram the event source with checking both queues for the 505 * next event 506 * Called with interrupts disabled and base->lock held 507 */ 508static void 509hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) 510{ 511 int i; 512 struct hrtimer_clock_base *base = cpu_base->clock_base; 513 ktime_t expires, expires_next; 514 515 expires_next.tv64 = KTIME_MAX; 516 517 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 518 struct hrtimer *timer; 519 520 if (!base->first) 521 continue; 522 timer = rb_entry(base->first, struct hrtimer, node); 523 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 524 /* 525 * clock_was_set() has changed base->offset so the 526 * result might be negative. Fix it up to prevent a 527 * false positive in clockevents_program_event() 528 */ 529 if (expires.tv64 < 0) 530 expires.tv64 = 0; 531 if (expires.tv64 < expires_next.tv64) 532 expires_next = expires; 533 } 534 535 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 536 return; 537 538 cpu_base->expires_next.tv64 = expires_next.tv64; 539 540 if (cpu_base->expires_next.tv64 != KTIME_MAX) 541 tick_program_event(cpu_base->expires_next, 1); 542} 543 544/* 545 * Shared reprogramming for clock_realtime and clock_monotonic 546 * 547 * When a timer is enqueued and expires earlier than the already enqueued 548 * timers, we have to check, whether it expires earlier than the timer for 549 * which the clock event device was armed. 550 * 551 * Called with interrupts disabled and base->cpu_base.lock held 552 */ 553static int hrtimer_reprogram(struct hrtimer *timer, 554 struct hrtimer_clock_base *base) 555{ 556 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 557 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 558 int res; 559 560 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); 561 562 /* 563 * When the callback is running, we do not reprogram the clock event 564 * device. The timer callback is either running on a different CPU or 565 * the callback is executed in the hrtimer_interrupt context. The 566 * reprogramming is handled either by the softirq, which called the 567 * callback or at the end of the hrtimer_interrupt. 568 */ 569 if (hrtimer_callback_running(timer)) 570 return 0; 571 572 /* 573 * CLOCK_REALTIME timer might be requested with an absolute 574 * expiry time which is less than base->offset. Nothing wrong 575 * about that, just avoid to call into the tick code, which 576 * has now objections against negative expiry values. 577 */ 578 if (expires.tv64 < 0) 579 return -ETIME; 580 581 if (expires.tv64 >= cpu_base->expires_next.tv64) 582 return 0; 583 584 /* 585 * If a hang was detected in the last timer interrupt then we 586 * do not schedule a timer which is earlier than the expiry 587 * which we enforced in the hang detection. We want the system 588 * to make progress. 589 */ 590 if (cpu_base->hang_detected) 591 return 0; 592 593 /* 594 * Clockevents returns -ETIME, when the event was in the past. 595 */ 596 res = tick_program_event(expires, 0); 597 if (!IS_ERR_VALUE(res)) 598 cpu_base->expires_next = expires; 599 return res; 600} 601 602 603/* 604 * Retrigger next event is called after clock was set 605 * 606 * Called with interrupts disabled via on_each_cpu() 607 */ 608static void retrigger_next_event(void *arg) 609{ 610 struct hrtimer_cpu_base *base; 611 struct timespec realtime_offset, wtm; 612 unsigned long seq; 613 614 if (!hrtimer_hres_active()) 615 return; 616 617 do { 618 seq = read_seqbegin(&xtime_lock); 619 wtm = __get_wall_to_monotonic(); 620 } while (read_seqretry(&xtime_lock, seq)); 621 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); 622 623 base = &__get_cpu_var(hrtimer_bases); 624 625 /* Adjust CLOCK_REALTIME offset */ 626 raw_spin_lock(&base->lock); 627 base->clock_base[CLOCK_REALTIME].offset = 628 timespec_to_ktime(realtime_offset); 629 630 hrtimer_force_reprogram(base, 0); 631 raw_spin_unlock(&base->lock); 632} 633 634/* 635 * Clock realtime was set 636 * 637 * Change the offset of the realtime clock vs. the monotonic 638 * clock. 639 * 640 * We might have to reprogram the high resolution timer interrupt. On 641 * SMP we call the architecture specific code to retrigger _all_ high 642 * resolution timer interrupts. On UP we just disable interrupts and 643 * call the high resolution interrupt code. 644 */ 645void clock_was_set(void) 646{ 647 /* Retrigger the CPU local events everywhere */ 648 on_each_cpu(retrigger_next_event, NULL, 1); 649} 650 651/* 652 * During resume we might have to reprogram the high resolution timer 653 * interrupt (on the local CPU): 654 */ 655void hres_timers_resume(void) 656{ 657 WARN_ONCE(!irqs_disabled(), 658 KERN_INFO "hres_timers_resume() called with IRQs enabled!"); 659 660 retrigger_next_event(NULL); 661} 662 663/* 664 * Initialize the high resolution related parts of cpu_base 665 */ 666static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 667{ 668 base->expires_next.tv64 = KTIME_MAX; 669 base->hres_active = 0; 670} 671 672/* 673 * Initialize the high resolution related parts of a hrtimer 674 */ 675static inline void hrtimer_init_timer_hres(struct hrtimer *timer) 676{ 677} 678 679 680/* 681 * When High resolution timers are active, try to reprogram. Note, that in case 682 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 683 * check happens. The timer gets enqueued into the rbtree. The reprogramming 684 * and expiry check is done in the hrtimer_interrupt or in the softirq. 685 */ 686static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 687 struct hrtimer_clock_base *base, 688 int wakeup) 689{ 690 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 691 if (wakeup) { 692 raw_spin_unlock(&base->cpu_base->lock); 693 raise_softirq_irqoff(HRTIMER_SOFTIRQ); 694 raw_spin_lock(&base->cpu_base->lock); 695 } else 696 __raise_softirq_irqoff(HRTIMER_SOFTIRQ); 697 698 return 1; 699 } 700 701 return 0; 702} 703 704/* 705 * Switch to high resolution mode 706 */ 707static int hrtimer_switch_to_hres(void) 708{ 709 int cpu = smp_processor_id(); 710 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); 711 unsigned long flags; 712 713 if (base->hres_active) 714 return 1; 715 716 local_irq_save(flags); 717 718 if (tick_init_highres()) { 719 local_irq_restore(flags); 720 printk(KERN_WARNING "Could not switch to high resolution " 721 "mode on CPU %d\n", cpu); 722 return 0; 723 } 724 base->hres_active = 1; 725 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; 726 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; 727 728 tick_setup_sched_timer(); 729 730 /* "Retrigger" the interrupt to get things going */ 731 retrigger_next_event(NULL); 732 local_irq_restore(flags); 733 return 1; 734} 735 736#else 737 738static inline int hrtimer_hres_active(void) { return 0; } 739static inline int hrtimer_is_hres_enabled(void) { return 0; } 740static inline int hrtimer_switch_to_hres(void) { return 0; } 741static inline void 742hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 743static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 744 struct hrtimer_clock_base *base, 745 int wakeup) 746{ 747 return 0; 748} 749static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 750static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } 751 752#endif /* CONFIG_HIGH_RES_TIMERS */ 753 754static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) 755{ 756#ifdef CONFIG_TIMER_STATS 757 if (timer->start_site) 758 return; 759 timer->start_site = __builtin_return_address(0); 760 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); 761 timer->start_pid = current->pid; 762#endif 763} 764 765static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) 766{ 767#ifdef CONFIG_TIMER_STATS 768 timer->start_site = NULL; 769#endif 770} 771 772static inline void timer_stats_account_hrtimer(struct hrtimer *timer) 773{ 774#ifdef CONFIG_TIMER_STATS 775 if (likely(!timer_stats_active)) 776 return; 777 timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 778 timer->function, timer->start_comm, 0); 779#endif 780} 781 782/* 783 * Counterpart to lock_hrtimer_base above: 784 */ 785static inline 786void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 787{ 788 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 789} 790 791/** 792 * hrtimer_forward - forward the timer expiry 793 * @timer: hrtimer to forward 794 * @now: forward past this time 795 * @interval: the interval to forward 796 * 797 * Forward the timer expiry so it will expire in the future. 798 * Returns the number of overruns. 799 */ 800u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) 801{ 802 u64 orun = 1; 803 ktime_t delta; 804 805 delta = ktime_sub(now, hrtimer_get_expires(timer)); 806 807 if (delta.tv64 < 0) 808 return 0; 809 810 if (interval.tv64 < timer->base->resolution.tv64) 811 interval.tv64 = timer->base->resolution.tv64; 812 813 if (unlikely(delta.tv64 >= interval.tv64)) { 814 s64 incr = ktime_to_ns(interval); 815 816 orun = ktime_divns(delta, incr); 817 hrtimer_add_expires_ns(timer, incr * orun); 818 if (hrtimer_get_expires_tv64(timer) > now.tv64) 819 return orun; 820 /* 821 * This (and the ktime_add() below) is the 822 * correction for exact: 823 */ 824 orun++; 825 } 826 hrtimer_add_expires(timer, interval); 827 828 return orun; 829} 830EXPORT_SYMBOL_GPL(hrtimer_forward); 831 832/* 833 * enqueue_hrtimer - internal function to (re)start a timer 834 * 835 * The timer is inserted in expiry order. Insertion into the 836 * red black tree is O(log(n)). Must hold the base lock. 837 * 838 * Returns 1 when the new timer is the leftmost timer in the tree. 839 */ 840static int enqueue_hrtimer(struct hrtimer *timer, 841 struct hrtimer_clock_base *base) 842{ 843 struct rb_node **link = &base->active.rb_node; 844 struct rb_node *parent = NULL; 845 struct hrtimer *entry; 846 int leftmost = 1; 847 848 debug_activate(timer); 849 850 /* 851 * Find the right place in the rbtree: 852 */ 853 while (*link) { 854 parent = *link; 855 entry = rb_entry(parent, struct hrtimer, node); 856 /* 857 * We dont care about collisions. Nodes with 858 * the same expiry time stay together. 859 */ 860 if (hrtimer_get_expires_tv64(timer) < 861 hrtimer_get_expires_tv64(entry)) { 862 link = &(*link)->rb_left; 863 } else { 864 link = &(*link)->rb_right; 865 leftmost = 0; 866 } 867 } 868 869 /* 870 * Insert the timer to the rbtree and check whether it 871 * replaces the first pending timer 872 */ 873 if (leftmost) 874 base->first = &timer->node; 875 876 rb_link_node(&timer->node, parent, link); 877 rb_insert_color(&timer->node, &base->active); 878 /* 879 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the 880 * state of a possibly running callback. 881 */ 882 timer->state |= HRTIMER_STATE_ENQUEUED; 883 884 return leftmost; 885} 886 887/* 888 * __remove_hrtimer - internal function to remove a timer 889 * 890 * Caller must hold the base lock. 891 * 892 * High resolution timer mode reprograms the clock event device when the 893 * timer is the one which expires next. The caller can disable this by setting 894 * reprogram to zero. This is useful, when the context does a reprogramming 895 * anyway (e.g. timer interrupt) 896 */ 897static void __remove_hrtimer(struct hrtimer *timer, 898 struct hrtimer_clock_base *base, 899 unsigned long newstate, int reprogram) 900{ 901 if (!(timer->state & HRTIMER_STATE_ENQUEUED)) 902 goto out; 903 904 /* 905 * Remove the timer from the rbtree and replace the first 906 * entry pointer if necessary. 907 */ 908 if (base->first == &timer->node) { 909 base->first = rb_next(&timer->node); 910#ifdef CONFIG_HIGH_RES_TIMERS 911 /* Reprogram the clock event device. if enabled */ 912 if (reprogram && hrtimer_hres_active()) { 913 ktime_t expires; 914 915 expires = ktime_sub(hrtimer_get_expires(timer), 916 base->offset); 917 if (base->cpu_base->expires_next.tv64 == expires.tv64) 918 hrtimer_force_reprogram(base->cpu_base, 1); 919 } 920#endif 921 } 922 rb_erase(&timer->node, &base->active); 923out: 924 timer->state = newstate; 925} 926 927/* 928 * remove hrtimer, called with base lock held 929 */ 930static inline int 931remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) 932{ 933 if (hrtimer_is_queued(timer)) { 934 unsigned long state; 935 int reprogram; 936 937 /* 938 * Remove the timer and force reprogramming when high 939 * resolution mode is active and the timer is on the current 940 * CPU. If we remove a timer on another CPU, reprogramming is 941 * skipped. The interrupt event on this CPU is fired and 942 * reprogramming happens in the interrupt handler. This is a 943 * rare case and less expensive than a smp call. 944 */ 945 debug_deactivate(timer); 946 timer_stats_hrtimer_clear_start_info(timer); 947 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); 948 /* 949 * We must preserve the CALLBACK state flag here, 950 * otherwise we could move the timer base in 951 * switch_hrtimer_base. 952 */ 953 state = timer->state & HRTIMER_STATE_CALLBACK; 954 __remove_hrtimer(timer, base, state, reprogram); 955 return 1; 956 } 957 return 0; 958} 959 960int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 961 unsigned long delta_ns, const enum hrtimer_mode mode, 962 int wakeup) 963{ 964 struct hrtimer_clock_base *base, *new_base; 965 unsigned long flags; 966 int ret, leftmost; 967 968 base = lock_hrtimer_base(timer, &flags); 969 970 /* Remove an active timer from the queue: */ 971 ret = remove_hrtimer(timer, base); 972 973 /* Switch the timer base, if necessary: */ 974 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 975 976 if (mode & HRTIMER_MODE_REL) { 977 tim = ktime_add_safe(tim, new_base->get_time()); 978 /* 979 * CONFIG_TIME_LOW_RES is a temporary way for architectures 980 * to signal that they simply return xtime in 981 * do_gettimeoffset(). In this case we want to round up by 982 * resolution when starting a relative timer, to avoid short 983 * timeouts. This will go away with the GTOD framework. 984 */ 985#ifdef CONFIG_TIME_LOW_RES 986 tim = ktime_add_safe(tim, base->resolution); 987#endif 988 } 989 990 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 991 992 timer_stats_hrtimer_set_start_info(timer); 993 994 leftmost = enqueue_hrtimer(timer, new_base); 995 996 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) 997 hrtimer_enqueue_reprogram(timer, new_base, wakeup); 998 999 unlock_hrtimer_base(timer, &flags); 1000 1001 return ret; 1002} 1003 1004/** 1005 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU 1006 * @timer: the timer to be added 1007 * @tim: expiry time 1008 * @delta_ns: "slack" range for the timer 1009 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 1010 * 1011 * Returns: 1012 * 0 on success 1013 * 1 when the timer was active 1014 */ 1015int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 1016 unsigned long delta_ns, const enum hrtimer_mode mode) 1017{ 1018 return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); 1019} 1020EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 1021 1022/** 1023 * hrtimer_start - (re)start an hrtimer on the current CPU 1024 * @timer: the timer to be added 1025 * @tim: expiry time 1026 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 1027 * 1028 * Returns: 1029 * 0 on success 1030 * 1 when the timer was active 1031 */ 1032int 1033hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) 1034{ 1035 return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); 1036} 1037EXPORT_SYMBOL_GPL(hrtimer_start); 1038 1039 1040/** 1041 * hrtimer_try_to_cancel - try to deactivate a timer 1042 * @timer: hrtimer to stop 1043 * 1044 * Returns: 1045 * 0 when the timer was not active 1046 * 1 when the timer was active 1047 * -1 when the timer is currently excuting the callback function and 1048 * cannot be stopped 1049 */ 1050int hrtimer_try_to_cancel(struct hrtimer *timer) 1051{ 1052 struct hrtimer_clock_base *base; 1053 unsigned long flags; 1054 int ret = -1; 1055 1056 base = lock_hrtimer_base(timer, &flags); 1057 1058 if (!hrtimer_callback_running(timer)) 1059 ret = remove_hrtimer(timer, base); 1060 1061 unlock_hrtimer_base(timer, &flags); 1062 1063 return ret; 1064 1065} 1066EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 1067 1068/** 1069 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 1070 * @timer: the timer to be cancelled 1071 * 1072 * Returns: 1073 * 0 when the timer was not active 1074 * 1 when the timer was active 1075 */ 1076int hrtimer_cancel(struct hrtimer *timer) 1077{ 1078 for (;;) { 1079 int ret = hrtimer_try_to_cancel(timer); 1080 1081 if (ret >= 0) 1082 return ret; 1083 cpu_relax(); 1084 } 1085} 1086EXPORT_SYMBOL_GPL(hrtimer_cancel); 1087 1088/** 1089 * hrtimer_get_remaining - get remaining time for the timer 1090 * @timer: the timer to read 1091 */ 1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1093{ 1094 unsigned long flags; 1095 ktime_t rem; 1096 1097 lock_hrtimer_base(timer, &flags); 1098 rem = hrtimer_expires_remaining(timer); 1099 unlock_hrtimer_base(timer, &flags); 1100 1101 return rem; 1102} 1103EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 1104 1105#ifdef CONFIG_NO_HZ 1106/** 1107 * hrtimer_get_next_event - get the time until next expiry event 1108 * 1109 * Returns the delta to the next expiry event or KTIME_MAX if no timer 1110 * is pending. 1111 */ 1112ktime_t hrtimer_get_next_event(void) 1113{ 1114 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1115 struct hrtimer_clock_base *base = cpu_base->clock_base; 1116 ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; 1117 unsigned long flags; 1118 int i; 1119 1120 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1121 1122 if (!hrtimer_hres_active()) { 1123 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 1124 struct hrtimer *timer; 1125 1126 if (!base->first) 1127 continue; 1128 1129 timer = rb_entry(base->first, struct hrtimer, node); 1130 delta.tv64 = hrtimer_get_expires_tv64(timer); 1131 delta = ktime_sub(delta, base->get_time()); 1132 if (delta.tv64 < mindelta.tv64) 1133 mindelta.tv64 = delta.tv64; 1134 } 1135 } 1136 1137 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1138 1139 if (mindelta.tv64 < 0) 1140 mindelta.tv64 = 0; 1141 return mindelta; 1142} 1143#endif 1144 1145static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1146 enum hrtimer_mode mode) 1147{ 1148 struct hrtimer_cpu_base *cpu_base; 1149 1150 memset(timer, 0, sizeof(struct hrtimer)); 1151 1152 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1153 1154 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1155 clock_id = CLOCK_MONOTONIC; 1156 1157 timer->base = &cpu_base->clock_base[clock_id]; 1158 hrtimer_init_timer_hres(timer); 1159 1160#ifdef CONFIG_TIMER_STATS 1161 timer->start_site = NULL; 1162 timer->start_pid = -1; 1163 memset(timer->start_comm, 0, TASK_COMM_LEN); 1164#endif 1165} 1166 1167/** 1168 * hrtimer_init - initialize a timer to the given clock 1169 * @timer: the timer to be initialized 1170 * @clock_id: the clock to be used 1171 * @mode: timer mode abs/rel 1172 */ 1173void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1174 enum hrtimer_mode mode) 1175{ 1176 debug_init(timer, clock_id, mode); 1177 __hrtimer_init(timer, clock_id, mode); 1178} 1179EXPORT_SYMBOL_GPL(hrtimer_init); 1180 1181/** 1182 * hrtimer_get_res - get the timer resolution for a clock 1183 * @which_clock: which clock to query 1184 * @tp: pointer to timespec variable to store the resolution 1185 * 1186 * Store the resolution of the clock selected by @which_clock in the 1187 * variable pointed to by @tp. 1188 */ 1189int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) 1190{ 1191 struct hrtimer_cpu_base *cpu_base; 1192 1193 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1194 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); 1195 1196 return 0; 1197} 1198EXPORT_SYMBOL_GPL(hrtimer_get_res); 1199 1200static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) 1201{ 1202 struct hrtimer_clock_base *base = timer->base; 1203 struct hrtimer_cpu_base *cpu_base = base->cpu_base; 1204 enum hrtimer_restart (*fn)(struct hrtimer *); 1205 int restart; 1206 1207 WARN_ON(!irqs_disabled()); 1208 1209 debug_deactivate(timer); 1210 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1211 timer_stats_account_hrtimer(timer); 1212 fn = timer->function; 1213 1214 /* 1215 * Because we run timers from hardirq context, there is no chance 1216 * they get migrated to another cpu, therefore its safe to unlock 1217 * the timer base. 1218 */ 1219 raw_spin_unlock(&cpu_base->lock); 1220 trace_hrtimer_expire_entry(timer, now); 1221 restart = fn(timer); 1222 trace_hrtimer_expire_exit(timer); 1223 raw_spin_lock(&cpu_base->lock); 1224 1225 /* 1226 * Note: We clear the CALLBACK bit after enqueue_hrtimer and 1227 * we do not reprogramm the event hardware. Happens either in 1228 * hrtimer_start_range_ns() or in hrtimer_interrupt() 1229 */ 1230 if (restart != HRTIMER_NORESTART) { 1231 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1232 enqueue_hrtimer(timer, base); 1233 } 1234 1235 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); 1236 1237 timer->state &= ~HRTIMER_STATE_CALLBACK; 1238} 1239 1240#ifdef CONFIG_HIGH_RES_TIMERS 1241 1242/* 1243 * High resolution timer interrupt 1244 * Called with interrupts disabled 1245 */ 1246void hrtimer_interrupt(struct clock_event_device *dev) 1247{ 1248 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1249 struct hrtimer_clock_base *base; 1250 ktime_t expires_next, now, entry_time, delta; 1251 int i, retries = 0; 1252 1253 BUG_ON(!cpu_base->hres_active); 1254 cpu_base->nr_events++; 1255 dev->next_event.tv64 = KTIME_MAX; 1256 1257 entry_time = now = ktime_get(); 1258retry: 1259 expires_next.tv64 = KTIME_MAX; 1260 1261 raw_spin_lock(&cpu_base->lock); 1262 /* 1263 * We set expires_next to KTIME_MAX here with cpu_base->lock 1264 * held to prevent that a timer is enqueued in our queue via 1265 * the migration code. This does not affect enqueueing of 1266 * timers which run their callback and need to be requeued on 1267 * this CPU. 1268 */ 1269 cpu_base->expires_next.tv64 = KTIME_MAX; 1270 1271 base = cpu_base->clock_base; 1272 1273 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1274 ktime_t basenow; 1275 struct rb_node *node; 1276 1277 basenow = ktime_add(now, base->offset); 1278 1279 while ((node = base->first)) { 1280 struct hrtimer *timer; 1281 1282 timer = rb_entry(node, struct hrtimer, node); 1283 1284 /* 1285 * The immediate goal for using the softexpires is 1286 * minimizing wakeups, not running timers at the 1287 * earliest interrupt after their soft expiration. 1288 * This allows us to avoid using a Priority Search 1289 * Tree, which can answer a stabbing querry for 1290 * overlapping intervals and instead use the simple 1291 * BST we already have. 1292 * We don't add extra wakeups by delaying timers that 1293 * are right-of a not yet expired timer, because that 1294 * timer will have to trigger a wakeup anyway. 1295 */ 1296 1297 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { 1298 ktime_t expires; 1299 1300 expires = ktime_sub(hrtimer_get_expires(timer), 1301 base->offset); 1302 if (expires.tv64 < expires_next.tv64) 1303 expires_next = expires; 1304 break; 1305 } 1306 1307 __run_hrtimer(timer, &basenow); 1308 } 1309 base++; 1310 } 1311 1312 /* 1313 * Store the new expiry value so the migration code can verify 1314 * against it. 1315 */ 1316 cpu_base->expires_next = expires_next; 1317 raw_spin_unlock(&cpu_base->lock); 1318 1319 /* Reprogramming necessary ? */ 1320 if (expires_next.tv64 == KTIME_MAX || 1321 !tick_program_event(expires_next, 0)) { 1322 cpu_base->hang_detected = 0; 1323 return; 1324 } 1325 1326 /* 1327 * The next timer was already expired due to: 1328 * - tracing 1329 * - long lasting callbacks 1330 * - being scheduled away when running in a VM 1331 * 1332 * We need to prevent that we loop forever in the hrtimer 1333 * interrupt routine. We give it 3 attempts to avoid 1334 * overreacting on some spurious event. 1335 */ 1336 now = ktime_get(); 1337 cpu_base->nr_retries++; 1338 if (++retries < 3) 1339 goto retry; 1340 /* 1341 * Give the system a chance to do something else than looping 1342 * here. We stored the entry time, so we know exactly how long 1343 * we spent here. We schedule the next event this amount of 1344 * time away. 1345 */ 1346 cpu_base->nr_hangs++; 1347 cpu_base->hang_detected = 1; 1348 delta = ktime_sub(now, entry_time); 1349 if (delta.tv64 > cpu_base->max_hang_time.tv64) 1350 cpu_base->max_hang_time = delta; 1351 /* 1352 * Limit it to a sensible value as we enforce a longer 1353 * delay. Give the CPU at least 100ms to catch up. 1354 */ 1355 if (delta.tv64 > 100 * NSEC_PER_MSEC) 1356 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1357 else 1358 expires_next = ktime_add(now, delta); 1359 tick_program_event(expires_next, 1); 1360 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", 1361 ktime_to_ns(delta)); 1362} 1363 1364/* 1365 * local version of hrtimer_peek_ahead_timers() called with interrupts 1366 * disabled. 1367 */ 1368static void __hrtimer_peek_ahead_timers(void) 1369{ 1370 struct tick_device *td; 1371 1372 if (!hrtimer_hres_active()) 1373 return; 1374 1375 td = &__get_cpu_var(tick_cpu_device); 1376 if (td && td->evtdev) 1377 hrtimer_interrupt(td->evtdev); 1378} 1379 1380/** 1381 * hrtimer_peek_ahead_timers -- run soft-expired timers now 1382 * 1383 * hrtimer_peek_ahead_timers will peek at the timer queue of 1384 * the current cpu and check if there are any timers for which 1385 * the soft expires time has passed. If any such timers exist, 1386 * they are run immediately and then removed from the timer queue. 1387 * 1388 */ 1389void hrtimer_peek_ahead_timers(void) 1390{ 1391 unsigned long flags; 1392 1393 local_irq_save(flags); 1394 __hrtimer_peek_ahead_timers(); 1395 local_irq_restore(flags); 1396} 1397 1398static void run_hrtimer_softirq(struct softirq_action *h) 1399{ 1400 hrtimer_peek_ahead_timers(); 1401} 1402 1403#else /* CONFIG_HIGH_RES_TIMERS */ 1404 1405static inline void __hrtimer_peek_ahead_timers(void) { } 1406 1407#endif /* !CONFIG_HIGH_RES_TIMERS */ 1408 1409/* 1410 * Called from timer softirq every jiffy, expire hrtimers: 1411 * 1412 * For HRT its the fall back code to run the softirq in the timer 1413 * softirq context in case the hrtimer initialization failed or has 1414 * not been done yet. 1415 */ 1416void hrtimer_run_pending(void) 1417{ 1418 if (hrtimer_hres_active()) 1419 return; 1420 1421 /* 1422 * This _is_ ugly: We have to check in the softirq context, 1423 * whether we can switch to highres and / or nohz mode. The 1424 * clocksource switch happens in the timer interrupt with 1425 * xtime_lock held. Notification from there only sets the 1426 * check bit in the tick_oneshot code, otherwise we might 1427 * deadlock vs. xtime_lock. 1428 */ 1429 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1430 hrtimer_switch_to_hres(); 1431} 1432 1433/* 1434 * Called from hardirq context every jiffy 1435 */ 1436void hrtimer_run_queues(void) 1437{ 1438 struct rb_node *node; 1439 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1440 struct hrtimer_clock_base *base; 1441 int index, gettime = 1; 1442 1443 if (hrtimer_hres_active()) 1444 return; 1445 1446 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { 1447 base = &cpu_base->clock_base[index]; 1448 1449 if (!base->first) 1450 continue; 1451 1452 if (gettime) { 1453 hrtimer_get_softirq_time(cpu_base); 1454 gettime = 0; 1455 } 1456 1457 raw_spin_lock(&cpu_base->lock); 1458 1459 while ((node = base->first)) { 1460 struct hrtimer *timer; 1461 1462 timer = rb_entry(node, struct hrtimer, node); 1463 if (base->softirq_time.tv64 <= 1464 hrtimer_get_expires_tv64(timer)) 1465 break; 1466 1467 __run_hrtimer(timer, &base->softirq_time); 1468 } 1469 raw_spin_unlock(&cpu_base->lock); 1470 } 1471} 1472 1473/* 1474 * Sleep related functions: 1475 */ 1476static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) 1477{ 1478 struct hrtimer_sleeper *t = 1479 container_of(timer, struct hrtimer_sleeper, timer); 1480 struct task_struct *task = t->task; 1481 1482 t->task = NULL; 1483 if (task) 1484 wake_up_process(task); 1485 1486 return HRTIMER_NORESTART; 1487} 1488 1489void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) 1490{ 1491 sl->timer.function = hrtimer_wakeup; 1492 sl->task = task; 1493} 1494EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); 1495 1496static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1497{ 1498 hrtimer_init_sleeper(t, current); 1499 1500 do { 1501 set_current_state(TASK_INTERRUPTIBLE); 1502 hrtimer_start_expires(&t->timer, mode); 1503 if (!hrtimer_active(&t->timer)) 1504 t->task = NULL; 1505 1506 if (likely(t->task)) 1507 schedule(); 1508 1509 hrtimer_cancel(&t->timer); 1510 mode = HRTIMER_MODE_ABS; 1511 1512 } while (t->task && !signal_pending(current)); 1513 1514 __set_current_state(TASK_RUNNING); 1515 1516 return t->task == NULL; 1517} 1518 1519static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) 1520{ 1521 struct timespec rmt; 1522 ktime_t rem; 1523 1524 rem = hrtimer_expires_remaining(timer); 1525 if (rem.tv64 <= 0) 1526 return 0; 1527 rmt = ktime_to_timespec(rem); 1528 1529 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) 1530 return -EFAULT; 1531 1532 return 1; 1533} 1534 1535long __sched hrtimer_nanosleep_restart(struct restart_block *restart) 1536{ 1537 struct hrtimer_sleeper t; 1538 struct timespec __user *rmtp; 1539 int ret = 0; 1540 1541 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1542 HRTIMER_MODE_ABS); 1543 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 1544 1545 if (do_nanosleep(&t, HRTIMER_MODE_ABS)) 1546 goto out; 1547 1548 rmtp = restart->nanosleep.rmtp; 1549 if (rmtp) { 1550 ret = update_rmtp(&t.timer, rmtp); 1551 if (ret <= 0) 1552 goto out; 1553 } 1554 1555 /* The other values in restart are already filled in */ 1556 ret = -ERESTART_RESTARTBLOCK; 1557out: 1558 destroy_hrtimer_on_stack(&t.timer); 1559 return ret; 1560} 1561 1562long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, 1563 const enum hrtimer_mode mode, const clockid_t clockid) 1564{ 1565 struct restart_block *restart; 1566 struct hrtimer_sleeper t; 1567 int ret = 0; 1568 unsigned long slack; 1569 1570 slack = current->timer_slack_ns; 1571 if (rt_task(current)) 1572 slack = 0; 1573 1574 hrtimer_init_on_stack(&t.timer, clockid, mode); 1575 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); 1576 if (do_nanosleep(&t, mode)) 1577 goto out; 1578 1579 /* Absolute timers do not update the rmtp value and restart: */ 1580 if (mode == HRTIMER_MODE_ABS) { 1581 ret = -ERESTARTNOHAND; 1582 goto out; 1583 } 1584 1585 if (rmtp) { 1586 ret = update_rmtp(&t.timer, rmtp); 1587 if (ret <= 0) 1588 goto out; 1589 } 1590 1591 restart = ¤t_thread_info()->restart_block; 1592 restart->fn = hrtimer_nanosleep_restart; 1593 restart->nanosleep.index = t.timer.base->index; 1594 restart->nanosleep.rmtp = rmtp; 1595 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1596 1597 ret = -ERESTART_RESTARTBLOCK; 1598out: 1599 destroy_hrtimer_on_stack(&t.timer); 1600 return ret; 1601} 1602 1603SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, 1604 struct timespec __user *, rmtp) 1605{ 1606 struct timespec tu; 1607 1608 if (copy_from_user(&tu, rqtp, sizeof(tu))) 1609 return -EFAULT; 1610 1611 if (!timespec_valid(&tu)) 1612 return -EINVAL; 1613 1614 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1615} 1616 1617/* 1618 * Functions related to boot-time initialization: 1619 */ 1620static void __cpuinit init_hrtimers_cpu(int cpu) 1621{ 1622 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1623 int i; 1624 1625 raw_spin_lock_init(&cpu_base->lock); 1626 1627 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1628 cpu_base->clock_base[i].cpu_base = cpu_base; 1629 1630 hrtimer_init_hres(cpu_base); 1631} 1632 1633#ifdef CONFIG_HOTPLUG_CPU 1634 1635static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1636 struct hrtimer_clock_base *new_base) 1637{ 1638 struct hrtimer *timer; 1639 struct rb_node *node; 1640 1641 while ((node = rb_first(&old_base->active))) { 1642 timer = rb_entry(node, struct hrtimer, node); 1643 BUG_ON(hrtimer_callback_running(timer)); 1644 debug_deactivate(timer); 1645 1646 /* 1647 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1648 * timer could be seen as !active and just vanish away 1649 * under us on another CPU 1650 */ 1651 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1652 timer->base = new_base; 1653 /* 1654 * Enqueue the timers on the new cpu. This does not 1655 * reprogram the event device in case the timer 1656 * expires before the earliest on this CPU, but we run 1657 * hrtimer_interrupt after we migrated everything to 1658 * sort out already expired timers and reprogram the 1659 * event device. 1660 */ 1661 enqueue_hrtimer(timer, new_base); 1662 1663 /* Clear the migration state bit */ 1664 timer->state &= ~HRTIMER_STATE_MIGRATE; 1665 } 1666} 1667 1668static void migrate_hrtimers(int scpu) 1669{ 1670 struct hrtimer_cpu_base *old_base, *new_base; 1671 int i; 1672 1673 BUG_ON(cpu_online(scpu)); 1674 tick_cancel_sched_timer(scpu); 1675 1676 local_irq_disable(); 1677 old_base = &per_cpu(hrtimer_bases, scpu); 1678 new_base = &__get_cpu_var(hrtimer_bases); 1679 /* 1680 * The caller is globally serialized and nobody else 1681 * takes two locks at once, deadlock is not possible. 1682 */ 1683 raw_spin_lock(&new_base->lock); 1684 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1685 1686 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1687 migrate_hrtimer_list(&old_base->clock_base[i], 1688 &new_base->clock_base[i]); 1689 } 1690 1691 raw_spin_unlock(&old_base->lock); 1692 raw_spin_unlock(&new_base->lock); 1693 1694 /* Check, if we got expired work to do */ 1695 __hrtimer_peek_ahead_timers(); 1696 local_irq_enable(); 1697} 1698 1699#endif /* CONFIG_HOTPLUG_CPU */ 1700 1701static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1702 unsigned long action, void *hcpu) 1703{ 1704 int scpu = (long)hcpu; 1705 1706 switch (action) { 1707 1708 case CPU_UP_PREPARE: 1709 case CPU_UP_PREPARE_FROZEN: 1710 init_hrtimers_cpu(scpu); 1711 break; 1712 1713#ifdef CONFIG_HOTPLUG_CPU 1714 case CPU_DYING: 1715 case CPU_DYING_FROZEN: 1716 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); 1717 break; 1718 case CPU_DEAD: 1719 case CPU_DEAD_FROZEN: 1720 { 1721 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); 1722 migrate_hrtimers(scpu); 1723 break; 1724 } 1725#endif 1726 1727 default: 1728 break; 1729 } 1730 1731 return NOTIFY_OK; 1732} 1733 1734static struct notifier_block __cpuinitdata hrtimers_nb = { 1735 .notifier_call = hrtimer_cpu_notify, 1736}; 1737 1738void __init hrtimers_init(void) 1739{ 1740 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1741 (void *)(long)smp_processor_id()); 1742 register_cpu_notifier(&hrtimers_nb); 1743#ifdef CONFIG_HIGH_RES_TIMERS 1744 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); 1745#endif 1746} 1747 1748/** 1749 * schedule_hrtimeout_range_clock - sleep until timeout 1750 * @expires: timeout value (ktime_t) 1751 * @delta: slack in expires timeout (ktime_t) 1752 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1753 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME 1754 */ 1755int __sched 1756schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, 1757 const enum hrtimer_mode mode, int clock) 1758{ 1759 struct hrtimer_sleeper t; 1760 1761 /* 1762 * Optimize when a zero timeout value is given. It does not 1763 * matter whether this is an absolute or a relative time. 1764 */ 1765 if (expires && !expires->tv64) { 1766 __set_current_state(TASK_RUNNING); 1767 return 0; 1768 } 1769 1770 /* 1771 * A NULL parameter means "inifinte" 1772 */ 1773 if (!expires) { 1774 schedule(); 1775 __set_current_state(TASK_RUNNING); 1776 return -EINTR; 1777 } 1778 1779 hrtimer_init_on_stack(&t.timer, clock, mode); 1780 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); 1781 1782 hrtimer_init_sleeper(&t, current); 1783 1784 hrtimer_start_expires(&t.timer, mode); 1785 if (!hrtimer_active(&t.timer)) 1786 t.task = NULL; 1787 1788 if (likely(t.task)) 1789 schedule(); 1790 1791 hrtimer_cancel(&t.timer); 1792 destroy_hrtimer_on_stack(&t.timer); 1793 1794 __set_current_state(TASK_RUNNING); 1795 1796 return !t.task ? 0 : -EINTR; 1797} 1798 1799/** 1800 * schedule_hrtimeout_range - sleep until timeout 1801 * @expires: timeout value (ktime_t) 1802 * @delta: slack in expires timeout (ktime_t) 1803 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1804 * 1805 * Make the current task sleep until the given expiry time has 1806 * elapsed. The routine will return immediately unless 1807 * the current task state has been set (see set_current_state()). 1808 * 1809 * The @delta argument gives the kernel the freedom to schedule the 1810 * actual wakeup to a time that is both power and performance friendly. 1811 * The kernel give the normal best effort behavior for "@expires+@delta", 1812 * but may decide to fire the timer earlier, but no earlier than @expires. 1813 * 1814 * You can set the task state as follows - 1815 * 1816 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1817 * pass before the routine returns. 1818 * 1819 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1820 * delivered to the current task. 1821 * 1822 * The current task state is guaranteed to be TASK_RUNNING when this 1823 * routine returns. 1824 * 1825 * Returns 0 when the timer has expired otherwise -EINTR 1826 */ 1827int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, 1828 const enum hrtimer_mode mode) 1829{ 1830 return schedule_hrtimeout_range_clock(expires, delta, mode, 1831 CLOCK_MONOTONIC); 1832} 1833EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); 1834 1835/** 1836 * schedule_hrtimeout - sleep until timeout 1837 * @expires: timeout value (ktime_t) 1838 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1839 * 1840 * Make the current task sleep until the given expiry time has 1841 * elapsed. The routine will return immediately unless 1842 * the current task state has been set (see set_current_state()). 1843 * 1844 * You can set the task state as follows - 1845 * 1846 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1847 * pass before the routine returns. 1848 * 1849 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1850 * delivered to the current task. 1851 * 1852 * The current task state is guaranteed to be TASK_RUNNING when this 1853 * routine returns. 1854 * 1855 * Returns 0 when the timer has expired otherwise -EINTR 1856 */ 1857int __sched schedule_hrtimeout(ktime_t *expires, 1858 const enum hrtimer_mode mode) 1859{ 1860 return schedule_hrtimeout_range(expires, 0, mode); 1861} 1862EXPORT_SYMBOL_GPL(schedule_hrtimeout); 1863