1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/module.h> 12#include <linux/delay.h> 13#include <linux/init.h> 14#include <linux/spinlock.h> 15#include <linux/sched.h> 16#include <linux/interrupt.h> 17#include <linux/cache.h> 18#include <linux/profile.h> 19#include <linux/errno.h> 20#include <linux/mm.h> 21#include <linux/err.h> 22#include <linux/cpu.h> 23#include <linux/smp.h> 24#include <linux/seq_file.h> 25#include <linux/irq.h> 26#include <linux/percpu.h> 27#include <linux/clockchips.h> 28 29#include <asm/atomic.h> 30#include <asm/cacheflush.h> 31#include <asm/cpu.h> 32#include <asm/cputype.h> 33#include <asm/mmu_context.h> 34#include <asm/pgtable.h> 35#include <asm/pgalloc.h> 36#include <asm/processor.h> 37#include <asm/tlbflush.h> 38#include <asm/ptrace.h> 39#include <asm/localtimer.h> 40#include <asm/smp_plat.h> 41 42#ifdef CONFIG_BCM47XX 43extern void soc_watchdog(void); 44#endif 45 46/* 47 * as from 2.5, kernels no longer have an init_tasks structure 48 * so we need some other way of telling a new secondary core 49 * where to place its SVC stack 50 */ 51struct secondary_data secondary_data; 52 53/* 54 * structures for inter-processor calls 55 * - A collection of single bit ipi messages. 56 */ 57struct ipi_data { 58 spinlock_t lock; 59 unsigned long ipi_count; 60 unsigned long bits; 61}; 62 63static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 64 .lock = SPIN_LOCK_UNLOCKED, 65}; 66 67enum ipi_msg_type { 68 IPI_TIMER, 69 IPI_RESCHEDULE, 70 IPI_CALL_FUNC, 71 IPI_CALL_FUNC_SINGLE, 72 IPI_CPU_STOP, 73}; 74 75int __cpuinit __cpu_up(unsigned int cpu) 76{ 77 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 78 struct task_struct *idle = ci->idle; 79 pgd_t *pgd; 80 pmd_t *pmd; 81 int ret; 82 83 /* 84 * Spawn a new process manually, if not already done. 85 * Grab a pointer to its task struct so we can mess with it 86 */ 87 if (!idle) { 88 idle = fork_idle(cpu); 89 if (IS_ERR(idle)) { 90 printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 91 return PTR_ERR(idle); 92 } 93 ci->idle = idle; 94 } else { 95 /* 96 * Since this idle thread is being re-used, call 97 * init_idle() to reinitialize the thread structure. 98 */ 99 init_idle(idle, cpu); 100 } 101 102 /* 103 * Allocate initial page tables to allow the new CPU to 104 * enable the MMU safely. This essentially means a set 105 * of our "standard" page tables, with the addition of 106 * a 1:1 mapping for the physical address of the kernel. 107 */ 108 pgd = pgd_alloc(&init_mm); 109 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); 110 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 111 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 112 flush_pmd_entry(pmd); 113 outer_clean_range(__pa(pmd), __pa(pmd + 1)); 114 115 /* 116 * We need to tell the secondary core where to find 117 * its stack and the page tables. 118 */ 119 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 120 secondary_data.pgdir = virt_to_phys(pgd); 121 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 122 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 123 124 /* 125 * Now bring the CPU into our world. 126 */ 127 ret = boot_secondary(cpu, idle); 128 if (ret == 0) { 129 /* 130 * timeout is in fixed jiffies - for slow processor 131 * the HZ is low, making the waiting longer as necesary. 132 */ 133 unsigned long timeout = 128 ; 134 135 /* 136 * CPU was successfully started, wait for it 137 * to come online or time out. 138 */ 139 timeout += jiffies ; 140 while (time_before(jiffies, timeout)) { 141 if (cpu_online(cpu)) 142 break; 143 144 udelay(10); 145 barrier(); 146 } 147 148 if (!cpu_online(cpu)) 149 ret = -EIO; 150 } 151 152 secondary_data.stack = NULL; 153 secondary_data.pgdir = 0; 154 155 *pmd = __pmd(0); 156 clean_pmd_entry(pmd); 157 pgd_free(&init_mm, pgd); 158 159 if (ret) { 160 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 161 162 } 163 164 return ret; 165} 166 167#ifdef CONFIG_HOTPLUG_CPU 168/* 169 * __cpu_disable runs on the processor to be shutdown. 170 */ 171int __cpu_disable(void) 172{ 173 unsigned int cpu = smp_processor_id(); 174 struct task_struct *p; 175 int ret; 176 177 ret = platform_cpu_disable(cpu); 178 if (ret) 179 return ret; 180 181 /* 182 * Take this CPU offline. Once we clear this, we can't return, 183 * and we must not schedule until we're ready to give up the cpu. 184 */ 185 set_cpu_online(cpu, false); 186 187 /* 188 * OK - migrate IRQs away from this CPU 189 */ 190 migrate_irqs(); 191 192 /* 193 * Stop the local timer for this CPU. 194 */ 195 local_timer_stop(); 196 197 /* 198 * Flush user cache and TLB mappings, and then remove this CPU 199 * from the vm mask set of all processes. 200 */ 201 flush_cache_all(); 202 local_flush_tlb_all(); 203 204 read_lock(&tasklist_lock); 205 for_each_process(p) { 206 if (p->mm) 207 cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); 208 } 209 read_unlock(&tasklist_lock); 210 211 return 0; 212} 213 214/* 215 * called on the thread which is asking for a CPU to be shutdown - 216 * waits until shutdown has completed, or it is timed out. 217 */ 218void __cpu_die(unsigned int cpu) 219{ 220 if (!platform_cpu_kill(cpu)) 221 printk("CPU%u: unable to kill\n", cpu); 222} 223 224/* 225 * Called from the idle thread for the CPU which has been shutdown. 226 * 227 * Note that we disable IRQs here, but do not re-enable them 228 * before returning to the caller. This is also the behaviour 229 * of the other hotplug-cpu capable cores, so presumably coming 230 * out of idle fixes this. 231 */ 232void __ref cpu_die(void) 233{ 234 unsigned int cpu = smp_processor_id(); 235 236 local_irq_disable(); 237 idle_task_exit(); 238 239 /* 240 * actual CPU shutdown procedure is at least platform (if not 241 * CPU) specific 242 */ 243 platform_cpu_die(cpu); 244 245 /* 246 * Do not return to the idle loop - jump back to the secondary 247 * cpu initialisation. There's some initialisation which needs 248 * to be repeated to undo the effects of taking the CPU offline. 249 */ 250 __asm__("mov sp, %0\n" 251 " b secondary_start_kernel" 252 : 253 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 254} 255#endif /* CONFIG_HOTPLUG_CPU */ 256 257/* 258 * This is the secondary CPU boot entry. We're using this CPUs 259 * idle thread stack, but a set of temporary page tables. 260 */ 261asmlinkage void __cpuinit secondary_start_kernel(void) 262{ 263 struct mm_struct *mm = &init_mm; 264 unsigned int cpu = smp_processor_id(); 265 266 printk("CPU%u: Booted secondary processor\n", cpu); 267 268 /* 269 * All kernel threads share the same mm context; grab a 270 * reference and switch to it. 271 */ 272 atomic_inc(&mm->mm_users); 273 atomic_inc(&mm->mm_count); 274 current->active_mm = mm; 275 cpumask_set_cpu(cpu, mm_cpumask(mm)); 276 cpu_switch_mm(mm->pgd, mm); 277 enter_lazy_tlb(mm, current); 278 local_flush_tlb_all(); 279 280 cpu_init(); 281 preempt_disable(); 282 283 /* 284 * Give the platform a chance to do its own initialisation. 285 */ 286 platform_secondary_init(cpu); 287 288 /* 289 * Enable local interrupts. 290 */ 291 notify_cpu_starting(cpu); 292 local_irq_enable(); 293 local_fiq_enable(); 294 295 /* 296 * Setup the percpu timer for this CPU. 297 */ 298 percpu_timer_setup(); 299 300 calibrate_delay(); 301 302 smp_store_cpu_info(cpu); 303 304 /* 305 * OK, now it's safe to let the boot CPU continue 306 */ 307 set_cpu_online(cpu, true); 308 309 /* 310 * OK, it's off to the idle thread for us 311 */ 312 cpu_idle(); 313} 314 315/* 316 * Called by both boot and secondaries to move global data into 317 * per-processor storage. 318 */ 319void __cpuinit smp_store_cpu_info(unsigned int cpuid) 320{ 321 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 322 323 cpu_info->loops_per_jiffy = loops_per_jiffy; 324} 325 326void __init smp_cpus_done(unsigned int max_cpus) 327{ 328 int cpu; 329 unsigned long bogosum = 0; 330 331 for_each_online_cpu(cpu) 332 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 333 334 printk(KERN_INFO "SMP: Total of %d processors activated " 335 "(%lu.%02lu BogoMIPS).\n", 336 num_online_cpus(), 337 bogosum / (500000/HZ), 338 (bogosum / (5000/HZ)) % 100); 339} 340 341void __init smp_prepare_boot_cpu(void) 342{ 343 unsigned int cpu = smp_processor_id(); 344 345 per_cpu(cpu_data, cpu).idle = current; 346} 347 348static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 349{ 350 unsigned long flags; 351 unsigned int cpu; 352 353 local_irq_save(flags); 354 355 for_each_cpu(cpu, mask) { 356 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 357 358 spin_lock(&ipi->lock); 359 ipi->bits |= 1 << msg; 360 spin_unlock(&ipi->lock); 361 } 362 363 /* 364 * Call the platform specific cross-CPU call function. 365 */ 366 smp_cross_call(mask); 367 368 local_irq_restore(flags); 369} 370 371void arch_send_call_function_ipi_mask(const struct cpumask *mask) 372{ 373 send_ipi_message(mask, IPI_CALL_FUNC); 374} 375 376void arch_send_call_function_single_ipi(int cpu) 377{ 378 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 379} 380 381void show_ipi_list(struct seq_file *p) 382{ 383 unsigned int cpu; 384 385 seq_puts(p, "IPI:"); 386 387 for_each_present_cpu(cpu) 388 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 389 390 seq_putc(p, '\n'); 391} 392 393void show_local_irqs(struct seq_file *p) 394{ 395 unsigned int cpu; 396 397 seq_printf(p, "LOC: "); 398 399 for_each_present_cpu(cpu) 400 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 401 402 seq_putc(p, '\n'); 403} 404 405/* 406 * Timer (local or broadcast) support 407 */ 408static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); 409 410static void ipi_timer(void) 411{ 412 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); 413#ifdef CONFIG_BCM47XX 414 int cpu = smp_processor_id(); 415#endif 416 irq_enter(); 417 evt->event_handler(evt); 418#ifdef CONFIG_BCM47XX 419 if (cpu == 0) 420 soc_watchdog(); 421#endif 422 irq_exit(); 423} 424 425#ifdef CONFIG_LOCAL_TIMERS 426asmlinkage void __exception do_local_timer(struct pt_regs *regs) 427{ 428 struct pt_regs *old_regs = set_irq_regs(regs); 429 int cpu = smp_processor_id(); 430 431 if (local_timer_ack()) { 432 irq_stat[cpu].local_timer_irqs++; 433 ipi_timer(); 434 } 435 436 set_irq_regs(old_regs); 437} 438#endif 439 440#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 441static void smp_timer_broadcast(const struct cpumask *mask) 442{ 443 send_ipi_message(mask, IPI_TIMER); 444} 445#else 446#define smp_timer_broadcast NULL 447#endif 448 449#ifndef CONFIG_LOCAL_TIMERS 450static void broadcast_timer_set_mode(enum clock_event_mode mode, 451 struct clock_event_device *evt) 452{ 453} 454 455static void local_timer_setup(struct clock_event_device *evt) 456{ 457 evt->name = "dummy_timer"; 458 evt->features = CLOCK_EVT_FEAT_ONESHOT | 459 CLOCK_EVT_FEAT_PERIODIC | 460 CLOCK_EVT_FEAT_DUMMY; 461 evt->rating = 400; 462 evt->mult = 1; 463 evt->set_mode = broadcast_timer_set_mode; 464 465 clockevents_register_device(evt); 466} 467#endif 468 469void __cpuinit percpu_timer_setup(void) 470{ 471 unsigned int cpu = smp_processor_id(); 472 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 473 474 evt->cpumask = cpumask_of(cpu); 475 evt->broadcast = smp_timer_broadcast; 476 477 local_timer_setup(evt); 478} 479 480static DEFINE_SPINLOCK(stop_lock); 481 482/* 483 * ipi_cpu_stop - handle IPI from smp_send_stop() 484 */ 485static void ipi_cpu_stop(unsigned int cpu) 486{ 487 if (system_state == SYSTEM_BOOTING || 488 system_state == SYSTEM_RUNNING) { 489 spin_lock(&stop_lock); 490 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 491 dump_stack(); 492 spin_unlock(&stop_lock); 493 } 494 495 set_cpu_online(cpu, false); 496 497 local_fiq_disable(); 498 local_irq_disable(); 499 500 while (1) 501 cpu_relax(); 502} 503 504/* 505 * Main handler for inter-processor interrupts 506 * 507 * For ARM, the ipimask now only identifies a single 508 * category of IPI (Bit 1 IPIs have been replaced by a 509 * different mechanism): 510 * 511 * Bit 0 - Inter-processor function call 512 */ 513asmlinkage void __exception do_IPI(struct pt_regs *regs) 514{ 515 unsigned int cpu = smp_processor_id(); 516 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 517 struct pt_regs *old_regs = set_irq_regs(regs); 518 519 ipi->ipi_count++; 520 521 for (;;) { 522 unsigned long msgs; 523 524 spin_lock(&ipi->lock); 525 msgs = ipi->bits; 526 ipi->bits = 0; 527 spin_unlock(&ipi->lock); 528 529 if (!msgs) 530 break; 531 532 do { 533 unsigned nextmsg; 534 535 nextmsg = msgs & -msgs; 536 msgs &= ~nextmsg; 537 nextmsg = ffz(~nextmsg); 538 539 switch (nextmsg) { 540 case IPI_TIMER: 541 ipi_timer(); 542 break; 543 544 case IPI_RESCHEDULE: 545 /* 546 * nothing more to do - eveything is 547 * done on the interrupt return path 548 */ 549 break; 550 551 case IPI_CALL_FUNC: 552 generic_smp_call_function_interrupt(); 553 break; 554 555 case IPI_CALL_FUNC_SINGLE: 556 generic_smp_call_function_single_interrupt(); 557 break; 558 559 case IPI_CPU_STOP: 560 ipi_cpu_stop(cpu); 561 break; 562 563 default: 564 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 565 cpu, nextmsg); 566 break; 567 } 568 } while (msgs); 569 } 570 571 set_irq_regs(old_regs); 572} 573 574void smp_send_reschedule(int cpu) 575{ 576 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 577} 578 579void smp_send_stop(void) 580{ 581 cpumask_t mask = cpu_online_map; 582 cpu_clear(smp_processor_id(), mask); 583 send_ipi_message(&mask, IPI_CPU_STOP); 584} 585 586/* 587 * not supported here 588 */ 589int setup_profiling_timer(unsigned int multiplier) 590{ 591 return -EINVAL; 592} 593 594static void 595on_each_cpu_mask(void (*func)(void *), void *info, int wait, 596 const struct cpumask *mask) 597{ 598 preempt_disable(); 599 600 smp_call_function_many(mask, func, info, wait); 601 if (cpumask_test_cpu(smp_processor_id(), mask)) 602 func(info); 603 604 preempt_enable(); 605} 606 607/**********************************************************************/ 608 609/* 610 * TLB operations 611 */ 612struct tlb_args { 613 struct vm_area_struct *ta_vma; 614 unsigned long ta_start; 615 unsigned long ta_end; 616}; 617 618static inline void ipi_flush_tlb_all(void *ignored) 619{ 620 local_flush_tlb_all(); 621} 622 623static inline void ipi_flush_tlb_mm(void *arg) 624{ 625 struct mm_struct *mm = (struct mm_struct *)arg; 626 627 local_flush_tlb_mm(mm); 628} 629 630static inline void ipi_flush_tlb_page(void *arg) 631{ 632 struct tlb_args *ta = (struct tlb_args *)arg; 633 634 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 635} 636 637static inline void ipi_flush_tlb_kernel_page(void *arg) 638{ 639 struct tlb_args *ta = (struct tlb_args *)arg; 640 641 local_flush_tlb_kernel_page(ta->ta_start); 642} 643 644static inline void ipi_flush_tlb_range(void *arg) 645{ 646 struct tlb_args *ta = (struct tlb_args *)arg; 647 648 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 649} 650 651static inline void ipi_flush_tlb_kernel_range(void *arg) 652{ 653 struct tlb_args *ta = (struct tlb_args *)arg; 654 655 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 656} 657 658void flush_tlb_all(void) 659{ 660 if (tlb_ops_need_broadcast()) 661 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 662 else 663 local_flush_tlb_all(); 664} 665 666void flush_tlb_mm(struct mm_struct *mm) 667{ 668 if (tlb_ops_need_broadcast()) 669 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); 670 else 671 local_flush_tlb_mm(mm); 672} 673 674void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 675{ 676 if (tlb_ops_need_broadcast()) { 677 struct tlb_args ta; 678 ta.ta_vma = vma; 679 ta.ta_start = uaddr; 680 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); 681 } else 682 local_flush_tlb_page(vma, uaddr); 683} 684 685void flush_tlb_kernel_page(unsigned long kaddr) 686{ 687 if (tlb_ops_need_broadcast()) { 688 struct tlb_args ta; 689 ta.ta_start = kaddr; 690 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 691 } else 692 local_flush_tlb_kernel_page(kaddr); 693} 694 695void flush_tlb_range(struct vm_area_struct *vma, 696 unsigned long start, unsigned long end) 697{ 698 if (tlb_ops_need_broadcast()) { 699 struct tlb_args ta; 700 ta.ta_vma = vma; 701 ta.ta_start = start; 702 ta.ta_end = end; 703 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); 704 } else 705 local_flush_tlb_range(vma, start, end); 706} 707 708void flush_tlb_kernel_range(unsigned long start, unsigned long end) 709{ 710 if (tlb_ops_need_broadcast()) { 711 struct tlb_args ta; 712 ta.ta_start = start; 713 ta.ta_end = end; 714 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 715 } else 716 local_flush_tlb_kernel_range(start, end); 717} 718