1/* 2 * linux/arch/arm/kernel/smp.c 3 * 4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/module.h> 11#include <linux/delay.h> 12#include <linux/init.h> 13#include <linux/spinlock.h> 14#include <linux/sched.h> 15#include <linux/interrupt.h> 16#include <linux/cache.h> 17#include <linux/profile.h> 18#include <linux/errno.h> 19#include <linux/mm.h> 20#include <linux/cpu.h> 21#include <linux/smp.h> 22#include <linux/seq_file.h> 23#include <linux/irq.h> 24 25#include <asm/atomic.h> 26#include <asm/cacheflush.h> 27#include <asm/cpu.h> 28#include <asm/mmu_context.h> 29#include <asm/pgtable.h> 30#include <asm/pgalloc.h> 31#include <asm/processor.h> 32#include <asm/tlbflush.h> 33#include <asm/ptrace.h> 34 35/* 36 * bitmask of present and online CPUs. 37 * The present bitmask indicates that the CPU is physically present. 38 * The online bitmask indicates that the CPU is up and running. 39 */ 40cpumask_t cpu_possible_map; 41EXPORT_SYMBOL(cpu_possible_map); 42cpumask_t cpu_online_map; 43EXPORT_SYMBOL(cpu_online_map); 44 45/* 46 * as from 2.5, kernels no longer have an init_tasks structure 47 * so we need some other way of telling a new secondary core 48 * where to place its SVC stack 49 */ 50struct secondary_data secondary_data; 51 52/* 53 * structures for inter-processor calls 54 * - A collection of single bit ipi messages. 55 */ 56struct ipi_data { 57 spinlock_t lock; 58 unsigned long ipi_count; 59 unsigned long bits; 60}; 61 62static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 63 .lock = SPIN_LOCK_UNLOCKED, 64}; 65 66enum ipi_msg_type { 67 IPI_TIMER, 68 IPI_RESCHEDULE, 69 IPI_CALL_FUNC, 70 IPI_CPU_STOP, 71}; 72 73struct smp_call_struct { 74 void (*func)(void *info); 75 void *info; 76 int wait; 77 cpumask_t pending; 78 cpumask_t unfinished; 79}; 80 81static struct smp_call_struct * volatile smp_call_function_data; 82static DEFINE_SPINLOCK(smp_call_function_lock); 83 84int __cpuinit __cpu_up(unsigned int cpu) 85{ 86 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 87 struct task_struct *idle = ci->idle; 88 pgd_t *pgd; 89 pmd_t *pmd; 90 int ret; 91 92 /* 93 * Spawn a new process manually, if not already done. 94 * Grab a pointer to its task struct so we can mess with it 95 */ 96 if (!idle) { 97 idle = fork_idle(cpu); 98 if (IS_ERR(idle)) { 99 printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 100 return PTR_ERR(idle); 101 } 102 ci->idle = idle; 103 } 104 105 /* 106 * Allocate initial page tables to allow the new CPU to 107 * enable the MMU safely. This essentially means a set 108 * of our "standard" page tables, with the addition of 109 * a 1:1 mapping for the physical address of the kernel. 110 */ 111 pgd = pgd_alloc(&init_mm); 112 pmd = pmd_offset(pgd, PHYS_OFFSET); 113 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 114 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 115 116 /* 117 * We need to tell the secondary core where to find 118 * its stack and the page tables. 119 */ 120 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 121 secondary_data.pgdir = virt_to_phys(pgd); 122 wmb(); 123 124 /* 125 * Now bring the CPU into our world. 126 */ 127 ret = boot_secondary(cpu, idle); 128 if (ret == 0) { 129 unsigned long timeout; 130 131 /* 132 * CPU was successfully started, wait for it 133 * to come online or time out. 134 */ 135 timeout = jiffies + HZ; 136 while (time_before(jiffies, timeout)) { 137 if (cpu_online(cpu)) 138 break; 139 140 udelay(10); 141 barrier(); 142 } 143 144 if (!cpu_online(cpu)) 145 ret = -EIO; 146 } 147 148 secondary_data.stack = NULL; 149 secondary_data.pgdir = 0; 150 151 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); 152 pgd_free(pgd); 153 154 if (ret) { 155 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 156 157 } 158 159 return ret; 160} 161 162#ifdef CONFIG_HOTPLUG_CPU 163/* 164 * __cpu_disable runs on the processor to be shutdown. 165 */ 166int __cpuexit __cpu_disable(void) 167{ 168 unsigned int cpu = smp_processor_id(); 169 struct task_struct *p; 170 int ret; 171 172 ret = mach_cpu_disable(cpu); 173 if (ret) 174 return ret; 175 176 /* 177 * Take this CPU offline. Once we clear this, we can't return, 178 * and we must not schedule until we're ready to give up the cpu. 179 */ 180 cpu_clear(cpu, cpu_online_map); 181 182 /* 183 * OK - migrate IRQs away from this CPU 184 */ 185 migrate_irqs(); 186 187 /* 188 * Stop the local timer for this CPU. 189 */ 190 local_timer_stop(cpu); 191 192 /* 193 * Flush user cache and TLB mappings, and then remove this CPU 194 * from the vm mask set of all processes. 195 */ 196 flush_cache_all(); 197 local_flush_tlb_all(); 198 199 read_lock(&tasklist_lock); 200 for_each_process(p) { 201 if (p->mm) 202 cpu_clear(cpu, p->mm->cpu_vm_mask); 203 } 204 read_unlock(&tasklist_lock); 205 206 return 0; 207} 208 209/* 210 * called on the thread which is asking for a CPU to be shutdown - 211 * waits until shutdown has completed, or it is timed out. 212 */ 213void __cpuexit __cpu_die(unsigned int cpu) 214{ 215 if (!platform_cpu_kill(cpu)) 216 printk("CPU%u: unable to kill\n", cpu); 217} 218 219/* 220 * Called from the idle thread for the CPU which has been shutdown. 221 * 222 * Note that we disable IRQs here, but do not re-enable them 223 * before returning to the caller. This is also the behaviour 224 * of the other hotplug-cpu capable cores, so presumably coming 225 * out of idle fixes this. 226 */ 227void __cpuexit cpu_die(void) 228{ 229 unsigned int cpu = smp_processor_id(); 230 231 local_irq_disable(); 232 idle_task_exit(); 233 234 /* 235 * actual CPU shutdown procedure is at least platform (if not 236 * CPU) specific 237 */ 238 platform_cpu_die(cpu); 239 240 /* 241 * Do not return to the idle loop - jump back to the secondary 242 * cpu initialisation. There's some initialisation which needs 243 * to be repeated to undo the effects of taking the CPU offline. 244 */ 245 __asm__("mov sp, %0\n" 246 " b secondary_start_kernel" 247 : 248 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 249} 250#endif /* CONFIG_HOTPLUG_CPU */ 251 252/* 253 * This is the secondary CPU boot entry. We're using this CPUs 254 * idle thread stack, but a set of temporary page tables. 255 */ 256asmlinkage void __cpuinit secondary_start_kernel(void) 257{ 258 struct mm_struct *mm = &init_mm; 259 unsigned int cpu = smp_processor_id(); 260 261 printk("CPU%u: Booted secondary processor\n", cpu); 262 263 /* 264 * All kernel threads share the same mm context; grab a 265 * reference and switch to it. 266 */ 267 atomic_inc(&mm->mm_users); 268 atomic_inc(&mm->mm_count); 269 current->active_mm = mm; 270 cpu_set(cpu, mm->cpu_vm_mask); 271 cpu_switch_mm(mm->pgd, mm); 272 enter_lazy_tlb(mm, current); 273 local_flush_tlb_all(); 274 275 cpu_init(); 276 preempt_disable(); 277 278 /* 279 * Give the platform a chance to do its own initialisation. 280 */ 281 platform_secondary_init(cpu); 282 283 /* 284 * Enable local interrupts. 285 */ 286 local_irq_enable(); 287 local_fiq_enable(); 288 289 calibrate_delay(); 290 291 smp_store_cpu_info(cpu); 292 293 /* 294 * OK, now it's safe to let the boot CPU continue 295 */ 296 cpu_set(cpu, cpu_online_map); 297 298 /* 299 * Setup local timer for this CPU. 300 */ 301 local_timer_setup(cpu); 302 303 /* 304 * OK, it's off to the idle thread for us 305 */ 306 cpu_idle(); 307} 308 309/* 310 * Called by both boot and secondaries to move global data into 311 * per-processor storage. 312 */ 313void __cpuinit smp_store_cpu_info(unsigned int cpuid) 314{ 315 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 316 317 cpu_info->loops_per_jiffy = loops_per_jiffy; 318} 319 320void __init smp_cpus_done(unsigned int max_cpus) 321{ 322 int cpu; 323 unsigned long bogosum = 0; 324 325 for_each_online_cpu(cpu) 326 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 327 328 printk(KERN_INFO "SMP: Total of %d processors activated " 329 "(%lu.%02lu BogoMIPS).\n", 330 num_online_cpus(), 331 bogosum / (500000/HZ), 332 (bogosum / (5000/HZ)) % 100); 333} 334 335void __init smp_prepare_boot_cpu(void) 336{ 337 unsigned int cpu = smp_processor_id(); 338 339 per_cpu(cpu_data, cpu).idle = current; 340} 341 342static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) 343{ 344 unsigned long flags; 345 unsigned int cpu; 346 347 local_irq_save(flags); 348 349 for_each_cpu_mask(cpu, callmap) { 350 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 351 352 spin_lock(&ipi->lock); 353 ipi->bits |= 1 << msg; 354 spin_unlock(&ipi->lock); 355 } 356 357 /* 358 * Call the platform specific cross-CPU call function. 359 */ 360 smp_cross_call(callmap); 361 362 local_irq_restore(flags); 363} 364 365/* 366 * You must not call this function with disabled interrupts, from a 367 * hardware interrupt handler, nor from a bottom half handler. 368 */ 369static int smp_call_function_on_cpu(void (*func)(void *info), void *info, 370 int retry, int wait, cpumask_t callmap) 371{ 372 struct smp_call_struct data; 373 unsigned long timeout; 374 int ret = 0; 375 376 data.func = func; 377 data.info = info; 378 data.wait = wait; 379 380 cpu_clear(smp_processor_id(), callmap); 381 if (cpus_empty(callmap)) 382 goto out; 383 384 data.pending = callmap; 385 if (wait) 386 data.unfinished = callmap; 387 388 /* 389 * try to get the mutex on smp_call_function_data 390 */ 391 spin_lock(&smp_call_function_lock); 392 smp_call_function_data = &data; 393 394 send_ipi_message(callmap, IPI_CALL_FUNC); 395 396 timeout = jiffies + HZ; 397 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 398 barrier(); 399 400 /* 401 * did we time out? 402 */ 403 if (!cpus_empty(data.pending)) { 404 /* 405 * this may be causing our panic - report it 406 */ 407 printk(KERN_CRIT 408 "CPU%u: smp_call_function timeout for %p(%p)\n" 409 " callmap %lx pending %lx, %swait\n", 410 smp_processor_id(), func, info, *cpus_addr(callmap), 411 *cpus_addr(data.pending), wait ? "" : "no "); 412 413 /* 414 * TRACE 415 */ 416 timeout = jiffies + (5 * HZ); 417 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 418 barrier(); 419 420 if (cpus_empty(data.pending)) 421 printk(KERN_CRIT " RESOLVED\n"); 422 else 423 printk(KERN_CRIT " STILL STUCK\n"); 424 } 425 426 /* 427 * whatever happened, we're done with the data, so release it 428 */ 429 smp_call_function_data = NULL; 430 spin_unlock(&smp_call_function_lock); 431 432 if (!cpus_empty(data.pending)) { 433 ret = -ETIMEDOUT; 434 goto out; 435 } 436 437 if (wait) 438 while (!cpus_empty(data.unfinished)) 439 barrier(); 440 out: 441 442 return 0; 443} 444 445int smp_call_function(void (*func)(void *info), void *info, int retry, 446 int wait) 447{ 448 return smp_call_function_on_cpu(func, info, retry, wait, 449 cpu_online_map); 450} 451EXPORT_SYMBOL_GPL(smp_call_function); 452 453void show_ipi_list(struct seq_file *p) 454{ 455 unsigned int cpu; 456 457 seq_puts(p, "IPI:"); 458 459 for_each_present_cpu(cpu) 460 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 461 462 seq_putc(p, '\n'); 463} 464 465void show_local_irqs(struct seq_file *p) 466{ 467 unsigned int cpu; 468 469 seq_printf(p, "LOC: "); 470 471 for_each_present_cpu(cpu) 472 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 473 474 seq_putc(p, '\n'); 475} 476 477static void ipi_timer(void) 478{ 479 irq_enter(); 480 profile_tick(CPU_PROFILING); 481 update_process_times(user_mode(get_irq_regs())); 482 irq_exit(); 483} 484 485#ifdef CONFIG_LOCAL_TIMERS 486asmlinkage void __exception do_local_timer(struct pt_regs *regs) 487{ 488 struct pt_regs *old_regs = set_irq_regs(regs); 489 int cpu = smp_processor_id(); 490 491 if (local_timer_ack()) { 492 irq_stat[cpu].local_timer_irqs++; 493 ipi_timer(); 494 } 495 496 set_irq_regs(old_regs); 497} 498#endif 499 500/* 501 * ipi_call_function - handle IPI from smp_call_function() 502 * 503 * Note that we copy data out of the cross-call structure and then 504 * let the caller know that we're here and have done with their data 505 */ 506static void ipi_call_function(unsigned int cpu) 507{ 508 struct smp_call_struct *data = smp_call_function_data; 509 void (*func)(void *info) = data->func; 510 void *info = data->info; 511 int wait = data->wait; 512 513 cpu_clear(cpu, data->pending); 514 515 func(info); 516 517 if (wait) 518 cpu_clear(cpu, data->unfinished); 519} 520 521static DEFINE_SPINLOCK(stop_lock); 522 523/* 524 * ipi_cpu_stop - handle IPI from smp_send_stop() 525 */ 526static void ipi_cpu_stop(unsigned int cpu) 527{ 528 spin_lock(&stop_lock); 529 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 530 dump_stack(); 531 spin_unlock(&stop_lock); 532 533 cpu_clear(cpu, cpu_online_map); 534 535 local_fiq_disable(); 536 local_irq_disable(); 537 538 while (1) 539 cpu_relax(); 540} 541 542/* 543 * Main handler for inter-processor interrupts 544 * 545 * For ARM, the ipimask now only identifies a single 546 * category of IPI (Bit 1 IPIs have been replaced by a 547 * different mechanism): 548 * 549 * Bit 0 - Inter-processor function call 550 */ 551asmlinkage void __exception do_IPI(struct pt_regs *regs) 552{ 553 unsigned int cpu = smp_processor_id(); 554 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 555 struct pt_regs *old_regs = set_irq_regs(regs); 556 557 ipi->ipi_count++; 558 559 for (;;) { 560 unsigned long msgs; 561 562 spin_lock(&ipi->lock); 563 msgs = ipi->bits; 564 ipi->bits = 0; 565 spin_unlock(&ipi->lock); 566 567 if (!msgs) 568 break; 569 570 do { 571 unsigned nextmsg; 572 573 nextmsg = msgs & -msgs; 574 msgs &= ~nextmsg; 575 nextmsg = ffz(~nextmsg); 576 577 switch (nextmsg) { 578 case IPI_TIMER: 579 ipi_timer(); 580 break; 581 582 case IPI_RESCHEDULE: 583 /* 584 * nothing more to do - eveything is 585 * done on the interrupt return path 586 */ 587 break; 588 589 case IPI_CALL_FUNC: 590 ipi_call_function(cpu); 591 break; 592 593 case IPI_CPU_STOP: 594 ipi_cpu_stop(cpu); 595 break; 596 597 default: 598 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 599 cpu, nextmsg); 600 break; 601 } 602 } while (msgs); 603 } 604 605 set_irq_regs(old_regs); 606} 607 608void smp_send_reschedule(int cpu) 609{ 610 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 611} 612 613void smp_send_timer(void) 614{ 615 cpumask_t mask = cpu_online_map; 616 cpu_clear(smp_processor_id(), mask); 617 send_ipi_message(mask, IPI_TIMER); 618} 619 620void smp_send_stop(void) 621{ 622 cpumask_t mask = cpu_online_map; 623 cpu_clear(smp_processor_id(), mask); 624 send_ipi_message(mask, IPI_CPU_STOP); 625} 626 627/* 628 * not supported here 629 */ 630int __init setup_profiling_timer(unsigned int multiplier) 631{ 632 return -EINVAL; 633} 634 635static int 636on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 637 cpumask_t mask) 638{ 639 int ret = 0; 640 641 preempt_disable(); 642 643 ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 644 if (cpu_isset(smp_processor_id(), mask)) 645 func(info); 646 647 preempt_enable(); 648 649 return ret; 650} 651 652/**********************************************************************/ 653 654/* 655 * TLB operations 656 */ 657struct tlb_args { 658 struct vm_area_struct *ta_vma; 659 unsigned long ta_start; 660 unsigned long ta_end; 661}; 662 663static inline void ipi_flush_tlb_all(void *ignored) 664{ 665 local_flush_tlb_all(); 666} 667 668static inline void ipi_flush_tlb_mm(void *arg) 669{ 670 struct mm_struct *mm = (struct mm_struct *)arg; 671 672 local_flush_tlb_mm(mm); 673} 674 675static inline void ipi_flush_tlb_page(void *arg) 676{ 677 struct tlb_args *ta = (struct tlb_args *)arg; 678 679 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 680} 681 682static inline void ipi_flush_tlb_kernel_page(void *arg) 683{ 684 struct tlb_args *ta = (struct tlb_args *)arg; 685 686 local_flush_tlb_kernel_page(ta->ta_start); 687} 688 689static inline void ipi_flush_tlb_range(void *arg) 690{ 691 struct tlb_args *ta = (struct tlb_args *)arg; 692 693 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 694} 695 696static inline void ipi_flush_tlb_kernel_range(void *arg) 697{ 698 struct tlb_args *ta = (struct tlb_args *)arg; 699 700 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 701} 702 703void flush_tlb_all(void) 704{ 705 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 706} 707 708void flush_tlb_mm(struct mm_struct *mm) 709{ 710 cpumask_t mask = mm->cpu_vm_mask; 711 712 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 713} 714 715void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 716{ 717 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 718 struct tlb_args ta; 719 720 ta.ta_vma = vma; 721 ta.ta_start = uaddr; 722 723 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 724} 725 726void flush_tlb_kernel_page(unsigned long kaddr) 727{ 728 struct tlb_args ta; 729 730 ta.ta_start = kaddr; 731 732 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 733} 734 735void flush_tlb_range(struct vm_area_struct *vma, 736 unsigned long start, unsigned long end) 737{ 738 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 739 struct tlb_args ta; 740 741 ta.ta_vma = vma; 742 ta.ta_start = start; 743 ta.ta_end = end; 744 745 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 746} 747 748void flush_tlb_kernel_range(unsigned long start, unsigned long end) 749{ 750 struct tlb_args ta; 751 752 ta.ta_start = start; 753 ta.ta_end = end; 754 755 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 756} 757