1/* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/time.h> 13#include <linux/delay.h> 14#include <linux/init.h> 15#include <linux/smp.h> 16#include <linux/types.h> 17#include <linux/timex.h> 18#include <linux/notifier.h> 19#include <linux/kernel_stat.h> 20#include <linux/rcupdate.h> 21#include <linux/posix-timers.h> 22#include <linux/cpu.h> 23 24#include <asm/s390_ext.h> 25#include <asm/timer.h> 26#include <asm/irq_regs.h> 27#include <asm/cputime.h> 28 29static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 30 31DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 32 33static inline __u64 get_vtimer(void) 34{ 35 __u64 timer; 36 37 asm volatile("STPT %0" : "=m" (timer)); 38 return timer; 39} 40 41static inline void set_vtimer(__u64 expires) 42{ 43 __u64 timer; 44 45 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 46 " SPT %1" /* Set new value immediatly afterwards */ 47 : "=m" (timer) : "m" (expires) ); 48 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 49 S390_lowcore.last_update_timer = expires; 50} 51 52/* 53 * Update process times based on virtual cpu times stored by entry.S 54 * to the lowcore fields user_timer, system_timer & steal_clock. 55 */ 56static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 57{ 58 struct thread_info *ti = task_thread_info(tsk); 59 __u64 timer, clock, user, system, steal; 60 61 timer = S390_lowcore.last_update_timer; 62 clock = S390_lowcore.last_update_clock; 63 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 64 " STCK %1" /* Store current tod clock value */ 65 : "=m" (S390_lowcore.last_update_timer), 66 "=m" (S390_lowcore.last_update_clock) ); 67 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 68 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 69 70 user = S390_lowcore.user_timer - ti->user_timer; 71 S390_lowcore.steal_timer -= user; 72 ti->user_timer = S390_lowcore.user_timer; 73 account_user_time(tsk, user, user); 74 75 system = S390_lowcore.system_timer - ti->system_timer; 76 S390_lowcore.steal_timer -= system; 77 ti->system_timer = S390_lowcore.system_timer; 78 account_system_time(tsk, hardirq_offset, system, system); 79 80 steal = S390_lowcore.steal_timer; 81 if ((s64) steal > 0) { 82 S390_lowcore.steal_timer = 0; 83 account_steal_time(steal); 84 } 85} 86 87void account_vtime(struct task_struct *prev, struct task_struct *next) 88{ 89 struct thread_info *ti; 90 91 do_account_vtime(prev, 0); 92 ti = task_thread_info(prev); 93 ti->user_timer = S390_lowcore.user_timer; 94 ti->system_timer = S390_lowcore.system_timer; 95 ti = task_thread_info(next); 96 S390_lowcore.user_timer = ti->user_timer; 97 S390_lowcore.system_timer = ti->system_timer; 98} 99 100void account_process_tick(struct task_struct *tsk, int user_tick) 101{ 102 do_account_vtime(tsk, HARDIRQ_OFFSET); 103} 104 105/* 106 * Update process times based on virtual cpu times stored by entry.S 107 * to the lowcore fields user_timer, system_timer & steal_clock. 108 */ 109void account_system_vtime(struct task_struct *tsk) 110{ 111 struct thread_info *ti = task_thread_info(tsk); 112 __u64 timer, system; 113 114 timer = S390_lowcore.last_update_timer; 115 S390_lowcore.last_update_timer = get_vtimer(); 116 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 117 118 system = S390_lowcore.system_timer - ti->system_timer; 119 S390_lowcore.steal_timer -= system; 120 ti->system_timer = S390_lowcore.system_timer; 121 account_system_time(tsk, 0, system, system); 122} 123EXPORT_SYMBOL_GPL(account_system_vtime); 124 125void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 126{ 127 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 128 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 129 __u64 idle_time, expires; 130 131 if (idle->idle_enter == 0ULL) 132 return; 133 134 /* Account time spent with enabled wait psw loaded as idle time. */ 135 idle_time = int_clock - idle->idle_enter; 136 account_idle_time(idle_time); 137 S390_lowcore.steal_timer += 138 idle->idle_enter - S390_lowcore.last_update_clock; 139 S390_lowcore.last_update_clock = int_clock; 140 141 /* Account system time spent going idle. */ 142 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 143 S390_lowcore.last_update_timer = enter_timer; 144 145 /* Restart vtime CPU timer */ 146 if (vq->do_spt) { 147 /* Program old expire value but first save progress. */ 148 expires = vq->idle - enter_timer; 149 expires += get_vtimer(); 150 set_vtimer(expires); 151 } else { 152 /* Don't account the CPU timer delta while the cpu was idle. */ 153 vq->elapsed -= vq->idle - enter_timer; 154 } 155 156 idle->sequence++; 157 smp_wmb(); 158 idle->idle_time += idle_time; 159 idle->idle_enter = 0ULL; 160 idle->idle_count++; 161 smp_wmb(); 162 idle->sequence++; 163} 164 165void vtime_stop_cpu(void) 166{ 167 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 168 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 169 psw_t psw; 170 171 /* Wait for external, I/O or machine check interrupt. */ 172 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; 173 174 idle->nohz_delay = 0; 175 176 /* Check if the CPU timer needs to be reprogrammed. */ 177 if (vq->do_spt) { 178 __u64 vmax = VTIMER_MAX_SLICE; 179 /* 180 * The inline assembly is equivalent to 181 * vq->idle = get_cpu_timer(); 182 * set_cpu_timer(VTIMER_MAX_SLICE); 183 * idle->idle_enter = get_clock(); 184 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 185 * PSW_MASK_IO | PSW_MASK_EXT); 186 * The difference is that the inline assembly makes sure that 187 * the last three instruction are stpt, stck and lpsw in that 188 * order. This is done to increase the precision. 189 */ 190 asm volatile( 191#ifndef CONFIG_64BIT 192 " basr 1,0\n" 193 "0: ahi 1,1f-0b\n" 194 " st 1,4(%2)\n" 195#else /* CONFIG_64BIT */ 196 " larl 1,1f\n" 197 " stg 1,8(%2)\n" 198#endif /* CONFIG_64BIT */ 199 " stpt 0(%4)\n" 200 " spt 0(%5)\n" 201 " stck 0(%3)\n" 202#ifndef CONFIG_64BIT 203 " lpsw 0(%2)\n" 204#else /* CONFIG_64BIT */ 205 " lpswe 0(%2)\n" 206#endif /* CONFIG_64BIT */ 207 "1:" 208 : "=m" (idle->idle_enter), "=m" (vq->idle) 209 : "a" (&psw), "a" (&idle->idle_enter), 210 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) 211 : "memory", "cc", "1"); 212 } else { 213 /* 214 * The inline assembly is equivalent to 215 * vq->idle = get_cpu_timer(); 216 * idle->idle_enter = get_clock(); 217 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 218 * PSW_MASK_IO | PSW_MASK_EXT); 219 * The difference is that the inline assembly makes sure that 220 * the last three instruction are stpt, stck and lpsw in that 221 * order. This is done to increase the precision. 222 */ 223 asm volatile( 224#ifndef CONFIG_64BIT 225 " basr 1,0\n" 226 "0: ahi 1,1f-0b\n" 227 " st 1,4(%2)\n" 228#else /* CONFIG_64BIT */ 229 " larl 1,1f\n" 230 " stg 1,8(%2)\n" 231#endif /* CONFIG_64BIT */ 232 " stpt 0(%4)\n" 233 " stck 0(%3)\n" 234#ifndef CONFIG_64BIT 235 " lpsw 0(%2)\n" 236#else /* CONFIG_64BIT */ 237 " lpswe 0(%2)\n" 238#endif /* CONFIG_64BIT */ 239 "1:" 240 : "=m" (idle->idle_enter), "=m" (vq->idle) 241 : "a" (&psw), "a" (&idle->idle_enter), 242 "a" (&vq->idle), "m" (psw) 243 : "memory", "cc", "1"); 244 } 245} 246 247cputime64_t s390_get_idle_time(int cpu) 248{ 249 struct s390_idle_data *idle; 250 unsigned long long now, idle_time, idle_enter; 251 unsigned int sequence; 252 253 idle = &per_cpu(s390_idle, cpu); 254 255 now = get_clock(); 256repeat: 257 sequence = idle->sequence; 258 smp_rmb(); 259 if (sequence & 1) 260 goto repeat; 261 idle_time = 0; 262 idle_enter = idle->idle_enter; 263 if (idle_enter != 0ULL && idle_enter < now) 264 idle_time = now - idle_enter; 265 smp_rmb(); 266 if (idle->sequence != sequence) 267 goto repeat; 268 return idle_time; 269} 270 271/* 272 * Sorted add to a list. List is linear searched until first bigger 273 * element is found. 274 */ 275static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 276{ 277 struct vtimer_list *event; 278 279 list_for_each_entry(event, head, entry) { 280 if (event->expires > timer->expires) { 281 list_add_tail(&timer->entry, &event->entry); 282 return; 283 } 284 } 285 list_add_tail(&timer->entry, head); 286} 287 288/* 289 * Do the callback functions of expired vtimer events. 290 * Called from within the interrupt handler. 291 */ 292static void do_callbacks(struct list_head *cb_list) 293{ 294 struct vtimer_queue *vq; 295 struct vtimer_list *event, *tmp; 296 297 if (list_empty(cb_list)) 298 return; 299 300 vq = &__get_cpu_var(virt_cpu_timer); 301 302 list_for_each_entry_safe(event, tmp, cb_list, entry) { 303 list_del_init(&event->entry); 304 (event->function)(event->data); 305 if (event->interval) { 306 /* Recharge interval timer */ 307 event->expires = event->interval + vq->elapsed; 308 spin_lock(&vq->lock); 309 list_add_sorted(event, &vq->list); 310 spin_unlock(&vq->lock); 311 } 312 } 313} 314 315/* 316 * Handler for the virtual CPU timer. 317 */ 318static void do_cpu_timer_interrupt(__u16 error_code) 319{ 320 struct vtimer_queue *vq; 321 struct vtimer_list *event, *tmp; 322 struct list_head cb_list; /* the callback queue */ 323 __u64 elapsed, next; 324 325 INIT_LIST_HEAD(&cb_list); 326 vq = &__get_cpu_var(virt_cpu_timer); 327 328 /* walk timer list, fire all expired events */ 329 spin_lock(&vq->lock); 330 331 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); 332 BUG_ON((s64) elapsed < 0); 333 vq->elapsed = 0; 334 list_for_each_entry_safe(event, tmp, &vq->list, entry) { 335 if (event->expires < elapsed) 336 /* move expired timer to the callback queue */ 337 list_move_tail(&event->entry, &cb_list); 338 else 339 event->expires -= elapsed; 340 } 341 spin_unlock(&vq->lock); 342 343 vq->do_spt = list_empty(&cb_list); 344 do_callbacks(&cb_list); 345 346 /* next event is first in list */ 347 next = VTIMER_MAX_SLICE; 348 spin_lock(&vq->lock); 349 if (!list_empty(&vq->list)) { 350 event = list_first_entry(&vq->list, struct vtimer_list, entry); 351 next = event->expires; 352 } else 353 vq->do_spt = 0; 354 spin_unlock(&vq->lock); 355 /* 356 * To improve precision add the time spent by the 357 * interrupt handler to the elapsed time. 358 * Note: CPU timer counts down and we got an interrupt, 359 * the current content is negative 360 */ 361 elapsed = S390_lowcore.async_enter_timer - get_vtimer(); 362 set_vtimer(next - elapsed); 363 vq->timer = next - elapsed; 364 vq->elapsed = elapsed; 365} 366 367void init_virt_timer(struct vtimer_list *timer) 368{ 369 timer->function = NULL; 370 INIT_LIST_HEAD(&timer->entry); 371} 372EXPORT_SYMBOL(init_virt_timer); 373 374static inline int vtimer_pending(struct vtimer_list *timer) 375{ 376 return (!list_empty(&timer->entry)); 377} 378 379/* 380 * this function should only run on the specified CPU 381 */ 382static void internal_add_vtimer(struct vtimer_list *timer) 383{ 384 struct vtimer_queue *vq; 385 unsigned long flags; 386 __u64 left, expires; 387 388 vq = &per_cpu(virt_cpu_timer, timer->cpu); 389 spin_lock_irqsave(&vq->lock, flags); 390 391 BUG_ON(timer->cpu != smp_processor_id()); 392 393 if (list_empty(&vq->list)) { 394 /* First timer on this cpu, just program it. */ 395 list_add(&timer->entry, &vq->list); 396 set_vtimer(timer->expires); 397 vq->timer = timer->expires; 398 vq->elapsed = 0; 399 } else { 400 /* Check progress of old timers. */ 401 expires = timer->expires; 402 left = get_vtimer(); 403 if (likely((s64) expires < (s64) left)) { 404 /* The new timer expires before the current timer. */ 405 set_vtimer(expires); 406 vq->elapsed += vq->timer - left; 407 vq->timer = expires; 408 } else { 409 vq->elapsed += vq->timer - left; 410 vq->timer = left; 411 } 412 /* Insert new timer into per cpu list. */ 413 timer->expires += vq->elapsed; 414 list_add_sorted(timer, &vq->list); 415 } 416 417 spin_unlock_irqrestore(&vq->lock, flags); 418 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 419 put_cpu(); 420} 421 422static inline void prepare_vtimer(struct vtimer_list *timer) 423{ 424 BUG_ON(!timer->function); 425 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); 426 BUG_ON(vtimer_pending(timer)); 427 timer->cpu = get_cpu(); 428} 429 430/* 431 * add_virt_timer - add an oneshot virtual CPU timer 432 */ 433void add_virt_timer(void *new) 434{ 435 struct vtimer_list *timer; 436 437 timer = (struct vtimer_list *)new; 438 prepare_vtimer(timer); 439 timer->interval = 0; 440 internal_add_vtimer(timer); 441} 442EXPORT_SYMBOL(add_virt_timer); 443 444/* 445 * add_virt_timer_int - add an interval virtual CPU timer 446 */ 447void add_virt_timer_periodic(void *new) 448{ 449 struct vtimer_list *timer; 450 451 timer = (struct vtimer_list *)new; 452 prepare_vtimer(timer); 453 timer->interval = timer->expires; 454 internal_add_vtimer(timer); 455} 456EXPORT_SYMBOL(add_virt_timer_periodic); 457 458int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 459{ 460 struct vtimer_queue *vq; 461 unsigned long flags; 462 int cpu; 463 464 BUG_ON(!timer->function); 465 BUG_ON(!expires || expires > VTIMER_MAX_SLICE); 466 467 if (timer->expires == expires && vtimer_pending(timer)) 468 return 1; 469 470 cpu = get_cpu(); 471 vq = &per_cpu(virt_cpu_timer, cpu); 472 473 /* disable interrupts before test if timer is pending */ 474 spin_lock_irqsave(&vq->lock, flags); 475 476 /* if timer isn't pending add it on the current CPU */ 477 if (!vtimer_pending(timer)) { 478 spin_unlock_irqrestore(&vq->lock, flags); 479 480 if (periodic) 481 timer->interval = expires; 482 else 483 timer->interval = 0; 484 timer->expires = expires; 485 timer->cpu = cpu; 486 internal_add_vtimer(timer); 487 return 0; 488 } 489 490 /* check if we run on the right CPU */ 491 BUG_ON(timer->cpu != cpu); 492 493 list_del_init(&timer->entry); 494 timer->expires = expires; 495 if (periodic) 496 timer->interval = expires; 497 498 /* the timer can't expire anymore so we can release the lock */ 499 spin_unlock_irqrestore(&vq->lock, flags); 500 internal_add_vtimer(timer); 501 return 1; 502} 503 504/* 505 * If we change a pending timer the function must be called on the CPU 506 * where the timer is running on. 507 * 508 * returns whether it has modified a pending timer (1) or not (0) 509 */ 510int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 511{ 512 return __mod_vtimer(timer, expires, 0); 513} 514EXPORT_SYMBOL(mod_virt_timer); 515 516/* 517 * If we change a pending timer the function must be called on the CPU 518 * where the timer is running on. 519 * 520 * returns whether it has modified a pending timer (1) or not (0) 521 */ 522int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) 523{ 524 return __mod_vtimer(timer, expires, 1); 525} 526EXPORT_SYMBOL(mod_virt_timer_periodic); 527 528/* 529 * delete a virtual timer 530 * 531 * returns whether the deleted timer was pending (1) or not (0) 532 */ 533int del_virt_timer(struct vtimer_list *timer) 534{ 535 unsigned long flags; 536 struct vtimer_queue *vq; 537 538 /* check if timer is pending */ 539 if (!vtimer_pending(timer)) 540 return 0; 541 542 vq = &per_cpu(virt_cpu_timer, timer->cpu); 543 spin_lock_irqsave(&vq->lock, flags); 544 545 /* we don't interrupt a running timer, just let it expire! */ 546 list_del_init(&timer->entry); 547 548 spin_unlock_irqrestore(&vq->lock, flags); 549 return 1; 550} 551EXPORT_SYMBOL(del_virt_timer); 552 553/* 554 * Start the virtual CPU timer on the current CPU. 555 */ 556void init_cpu_vtimer(void) 557{ 558 struct vtimer_queue *vq; 559 560 /* initialize per cpu vtimer structure */ 561 vq = &__get_cpu_var(virt_cpu_timer); 562 INIT_LIST_HEAD(&vq->list); 563 spin_lock_init(&vq->lock); 564 565 /* enable cpu timer interrupts */ 566 __ctl_set_bit(0,10); 567} 568 569static int __cpuinit s390_nohz_notify(struct notifier_block *self, 570 unsigned long action, void *hcpu) 571{ 572 struct s390_idle_data *idle; 573 long cpu = (long) hcpu; 574 575 idle = &per_cpu(s390_idle, cpu); 576 switch (action) { 577 case CPU_DYING: 578 case CPU_DYING_FROZEN: 579 idle->nohz_delay = 0; 580 default: 581 break; 582 } 583 return NOTIFY_OK; 584} 585 586void __init vtime_init(void) 587{ 588 /* request the cpu timer external interrupt */ 589 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) 590 panic("Couldn't request external interrupt 0x1005"); 591 592 /* Enable cpu timer interrupts on the boot cpu. */ 593 init_cpu_vtimer(); 594 cpu_notifier(s390_nohz_notify, 0); 595} 596