1/* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright IBM Corp. 1999,2007 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23#include <linux/module.h> 24#include <linux/init.h> 25#include <linux/mm.h> 26#include <linux/spinlock.h> 27#include <linux/kernel_stat.h> 28#include <linux/delay.h> 29#include <linux/cache.h> 30#include <linux/interrupt.h> 31#include <linux/cpu.h> 32#include <linux/timex.h> 33#include <linux/bootmem.h> 34#include <asm/ipl.h> 35#include <asm/setup.h> 36#include <asm/sigp.h> 37#include <asm/pgalloc.h> 38#include <asm/irq.h> 39#include <asm/s390_ext.h> 40#include <asm/cpcmd.h> 41#include <asm/tlbflush.h> 42#include <asm/timer.h> 43#include <asm/lowcore.h> 44 45/* 46 * An array with a pointer the lowcore of every CPU. 47 */ 48struct _lowcore *lowcore_ptr[NR_CPUS]; 49EXPORT_SYMBOL(lowcore_ptr); 50 51cpumask_t cpu_online_map = CPU_MASK_NONE; 52EXPORT_SYMBOL(cpu_online_map); 53 54cpumask_t cpu_possible_map = CPU_MASK_NONE; 55EXPORT_SYMBOL(cpu_possible_map); 56 57static struct task_struct *current_set[NR_CPUS]; 58 59static void smp_ext_bitcall(int, ec_bit_sig); 60 61/* 62 * Structure and data for __smp_call_function_map(). This is designed to 63 * minimise static memory requirements. It also looks cleaner. 64 */ 65static DEFINE_SPINLOCK(call_lock); 66 67struct call_data_struct { 68 void (*func) (void *info); 69 void *info; 70 cpumask_t started; 71 cpumask_t finished; 72 int wait; 73}; 74 75static struct call_data_struct *call_data; 76 77/* 78 * 'Call function' interrupt callback 79 */ 80static void do_call_function(void) 81{ 82 void (*func) (void *info) = call_data->func; 83 void *info = call_data->info; 84 int wait = call_data->wait; 85 86 cpu_set(smp_processor_id(), call_data->started); 87 (*func)(info); 88 if (wait) 89 cpu_set(smp_processor_id(), call_data->finished);; 90} 91 92static void __smp_call_function_map(void (*func) (void *info), void *info, 93 int nonatomic, int wait, cpumask_t map) 94{ 95 struct call_data_struct data; 96 int cpu, local = 0; 97 98 /* 99 * Can deadlock when interrupts are disabled or if in wrong context. 100 */ 101 WARN_ON(irqs_disabled() || in_irq()); 102 103 /* 104 * Check for local function call. We have to have the same call order 105 * as in on_each_cpu() because of machine_restart_smp(). 106 */ 107 if (cpu_isset(smp_processor_id(), map)) { 108 local = 1; 109 cpu_clear(smp_processor_id(), map); 110 } 111 112 cpus_and(map, map, cpu_online_map); 113 if (cpus_empty(map)) 114 goto out; 115 116 data.func = func; 117 data.info = info; 118 data.started = CPU_MASK_NONE; 119 data.wait = wait; 120 if (wait) 121 data.finished = CPU_MASK_NONE; 122 123 spin_lock_bh(&call_lock); 124 call_data = &data; 125 126 for_each_cpu_mask(cpu, map) 127 smp_ext_bitcall(cpu, ec_call_function); 128 129 /* Wait for response */ 130 while (!cpus_equal(map, data.started)) 131 cpu_relax(); 132 133 if (wait) 134 while (!cpus_equal(map, data.finished)) 135 cpu_relax(); 136 137 spin_unlock_bh(&call_lock); 138 139out: 140 local_irq_disable(); 141 if (local) 142 func(info); 143 local_irq_enable(); 144} 145 146/* 147 * smp_call_function: 148 * @func: the function to run; this must be fast and non-blocking 149 * @info: an arbitrary pointer to pass to the function 150 * @nonatomic: unused 151 * @wait: if true, wait (atomically) until function has completed on other CPUs 152 * 153 * Run a function on all other CPUs. 154 * 155 * You must not call this function with disabled interrupts, from a 156 * hardware interrupt handler or from a bottom half. 157 */ 158int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159 int wait) 160{ 161 cpumask_t map; 162 163 preempt_disable(); 164 map = cpu_online_map; 165 cpu_clear(smp_processor_id(), map); 166 __smp_call_function_map(func, info, nonatomic, wait, map); 167 preempt_enable(); 168 return 0; 169} 170EXPORT_SYMBOL(smp_call_function); 171 172/* 173 * smp_call_function_on: 174 * @func: the function to run; this must be fast and non-blocking 175 * @info: an arbitrary pointer to pass to the function 176 * @nonatomic: unused 177 * @wait: if true, wait (atomically) until function has completed on other CPUs 178 * @cpu: the CPU where func should run 179 * 180 * Run a function on one processor. 181 * 182 * You must not call this function with disabled interrupts, from a 183 * hardware interrupt handler or from a bottom half. 184 */ 185int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 186 int wait, int cpu) 187{ 188 cpumask_t map = CPU_MASK_NONE; 189 190 preempt_disable(); 191 cpu_set(cpu, map); 192 __smp_call_function_map(func, info, nonatomic, wait, map); 193 preempt_enable(); 194 return 0; 195} 196EXPORT_SYMBOL(smp_call_function_on); 197 198static void do_send_stop(void) 199{ 200 int cpu, rc; 201 202 /* stop all processors */ 203 for_each_online_cpu(cpu) { 204 if (cpu == smp_processor_id()) 205 continue; 206 do { 207 rc = signal_processor(cpu, sigp_stop); 208 } while (rc == sigp_busy); 209 } 210} 211 212static void do_store_status(void) 213{ 214 int cpu, rc; 215 216 /* store status of all processors in their lowcores (real 0) */ 217 for_each_online_cpu(cpu) { 218 if (cpu == smp_processor_id()) 219 continue; 220 do { 221 rc = signal_processor_p( 222 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 223 sigp_store_status_at_address); 224 } while (rc == sigp_busy); 225 } 226} 227 228static void do_wait_for_stop(void) 229{ 230 int cpu; 231 232 /* Wait for all other cpus to enter stopped state */ 233 for_each_online_cpu(cpu) { 234 if (cpu == smp_processor_id()) 235 continue; 236 while (!smp_cpu_not_running(cpu)) 237 cpu_relax(); 238 } 239} 240 241/* 242 * this function sends a 'stop' sigp to all other CPUs in the system. 243 * it goes straight through. 244 */ 245void smp_send_stop(void) 246{ 247 /* Disable all interrupts/machine checks */ 248 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 249 250 /* write magic number to zero page (absolute 0) */ 251 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 252 253 /* stop other processors. */ 254 do_send_stop(); 255 256 /* wait until other processors are stopped */ 257 do_wait_for_stop(); 258 259 /* store status of other processors. */ 260 do_store_status(); 261} 262 263/* 264 * Reboot, halt and power_off routines for SMP. 265 */ 266void machine_restart_smp(char *__unused) 267{ 268 smp_send_stop(); 269 do_reipl(); 270} 271 272void machine_halt_smp(void) 273{ 274 smp_send_stop(); 275 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 276 __cpcmd(vmhalt_cmd, NULL, 0, NULL); 277 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 278 for (;;); 279} 280 281void machine_power_off_smp(void) 282{ 283 smp_send_stop(); 284 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 285 __cpcmd(vmpoff_cmd, NULL, 0, NULL); 286 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 287 for (;;); 288} 289 290/* 291 * This is the main routine where commands issued by other 292 * cpus are handled. 293 */ 294 295static void do_ext_call_interrupt(__u16 code) 296{ 297 unsigned long bits; 298 299 /* 300 * handle bit signal external calls 301 * 302 * For the ec_schedule signal we have to do nothing. All the work 303 * is done automatically when we return from the interrupt. 304 */ 305 bits = xchg(&S390_lowcore.ext_call_fast, 0); 306 307 if (test_bit(ec_call_function, &bits)) 308 do_call_function(); 309} 310 311/* 312 * Send an external call sigp to another cpu and return without waiting 313 * for its completion. 314 */ 315static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 316{ 317 /* 318 * Set signaling bit in lowcore of target cpu and kick it 319 */ 320 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 321 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 322 udelay(10); 323} 324 325#ifndef CONFIG_64BIT 326/* 327 * this function sends a 'purge tlb' signal to another CPU. 328 */ 329void smp_ptlb_callback(void *info) 330{ 331 local_flush_tlb(); 332} 333 334void smp_ptlb_all(void) 335{ 336 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 337} 338EXPORT_SYMBOL(smp_ptlb_all); 339#endif /* ! CONFIG_64BIT */ 340 341/* 342 * this function sends a 'reschedule' IPI to another CPU. 343 * it goes straight through and wastes no time serializing 344 * anything. Worst case is that we lose a reschedule ... 345 */ 346void smp_send_reschedule(int cpu) 347{ 348 smp_ext_bitcall(cpu, ec_schedule); 349} 350 351/* 352 * parameter area for the set/clear control bit callbacks 353 */ 354struct ec_creg_mask_parms { 355 unsigned long orvals[16]; 356 unsigned long andvals[16]; 357}; 358 359/* 360 * callback for setting/clearing control bits 361 */ 362static void smp_ctl_bit_callback(void *info) 363{ 364 struct ec_creg_mask_parms *pp = info; 365 unsigned long cregs[16]; 366 int i; 367 368 __ctl_store(cregs, 0, 15); 369 for (i = 0; i <= 15; i++) 370 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 371 __ctl_load(cregs, 0, 15); 372} 373 374/* 375 * Set a bit in a control register of all cpus 376 */ 377void smp_ctl_set_bit(int cr, int bit) 378{ 379 struct ec_creg_mask_parms parms; 380 381 memset(&parms.orvals, 0, sizeof(parms.orvals)); 382 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 383 parms.orvals[cr] = 1 << bit; 384 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 385} 386EXPORT_SYMBOL(smp_ctl_set_bit); 387 388/* 389 * Clear a bit in a control register of all cpus 390 */ 391void smp_ctl_clear_bit(int cr, int bit) 392{ 393 struct ec_creg_mask_parms parms; 394 395 memset(&parms.orvals, 0, sizeof(parms.orvals)); 396 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 397 parms.andvals[cr] = ~(1L << bit); 398 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 399} 400EXPORT_SYMBOL(smp_ctl_clear_bit); 401 402#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 403 404/* 405 * zfcpdump_prefix_array holds prefix registers for the following scenario: 406 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to 407 * save its prefix registers, since they get lost, when switching from 31 bit 408 * to 64 bit. 409 */ 410unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ 411 __attribute__((__section__(".data"))); 412 413static void __init smp_get_save_areas(void) 414{ 415 unsigned int cpu, cpu_num, rc; 416 __u16 boot_cpu_addr; 417 418 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 419 return; 420 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 421 cpu_num = 1; 422 for (cpu = 0; cpu <= 65535; cpu++) { 423 if ((u16) cpu == boot_cpu_addr) 424 continue; 425 __cpu_logical_map[1] = (__u16) cpu; 426 if (signal_processor(1, sigp_sense) == sigp_not_operational) 427 continue; 428 if (cpu_num >= NR_CPUS) { 429 printk("WARNING: Registers for cpu %i are not " 430 "saved, since dump kernel was compiled with" 431 "NR_CPUS=%i!\n", cpu_num, NR_CPUS); 432 continue; 433 } 434 zfcpdump_save_areas[cpu_num] = 435 alloc_bootmem(sizeof(union save_area)); 436 while (1) { 437 rc = signal_processor(1, sigp_stop_and_store_status); 438 if (rc != sigp_busy) 439 break; 440 cpu_relax(); 441 } 442 memcpy(zfcpdump_save_areas[cpu_num], 443 (void *)(unsigned long) store_prefix() + 444 SAVE_AREA_BASE, SAVE_AREA_SIZE); 445#ifdef __s390x__ 446 /* copy original prefix register */ 447 zfcpdump_save_areas[cpu_num]->s390x.pref_reg = 448 zfcpdump_prefix_array[cpu_num]; 449#endif 450 cpu_num++; 451 } 452} 453 454union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 455EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 456 457#else 458#define smp_get_save_areas() do { } while (0) 459#endif 460 461/* 462 * Lets check how many CPUs we have. 463 */ 464 465static unsigned int __init smp_count_cpus(void) 466{ 467 unsigned int cpu, num_cpus; 468 __u16 boot_cpu_addr; 469 470 /* 471 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 472 */ 473 474 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 475 current_thread_info()->cpu = 0; 476 num_cpus = 1; 477 for (cpu = 0; cpu <= 65535; cpu++) { 478 if ((__u16) cpu == boot_cpu_addr) 479 continue; 480 __cpu_logical_map[1] = (__u16) cpu; 481 if (signal_processor(1, sigp_sense) == sigp_not_operational) 482 continue; 483 num_cpus++; 484 } 485 486 printk("Detected %d CPU's\n", (int) num_cpus); 487 printk("Boot cpu address %2X\n", boot_cpu_addr); 488 489 return num_cpus; 490} 491 492/* 493 * Activate a secondary processor. 494 */ 495int __cpuinit start_secondary(void *cpuvoid) 496{ 497 /* Setup the cpu */ 498 cpu_init(); 499 preempt_disable(); 500 /* Enable TOD clock interrupts on the secondary cpu. */ 501 init_cpu_timer(); 502#ifdef CONFIG_VIRT_TIMER 503 /* Enable cpu timer interrupts on the secondary cpu. */ 504 init_cpu_vtimer(); 505#endif 506 /* Enable pfault pseudo page faults on this cpu. */ 507 pfault_init(); 508 509 /* Mark this cpu as online */ 510 cpu_set(smp_processor_id(), cpu_online_map); 511 /* Switch on interrupts */ 512 local_irq_enable(); 513 /* Print info about this processor */ 514 print_cpu_info(&S390_lowcore.cpu_data); 515 /* cpu_idle will call schedule for us */ 516 cpu_idle(); 517 return 0; 518} 519 520static void __init smp_create_idle(unsigned int cpu) 521{ 522 struct task_struct *p; 523 524 /* 525 * don't care about the psw and regs settings since we'll never 526 * reschedule the forked task. 527 */ 528 p = fork_idle(cpu); 529 if (IS_ERR(p)) 530 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 531 current_set[cpu] = p; 532} 533 534static int cpu_stopped(int cpu) 535{ 536 __u32 status; 537 538 /* Check for stopped state */ 539 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 540 sigp_status_stored) { 541 if (status & 0x40) 542 return 1; 543 } 544 return 0; 545} 546 547/* Upping and downing of CPUs */ 548 549int __cpu_up(unsigned int cpu) 550{ 551 struct task_struct *idle; 552 struct _lowcore *cpu_lowcore; 553 struct stack_frame *sf; 554 sigp_ccode ccode; 555 int curr_cpu; 556 557 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 558 __cpu_logical_map[cpu] = (__u16) curr_cpu; 559 if (cpu_stopped(cpu)) 560 break; 561 } 562 563 if (!cpu_stopped(cpu)) 564 return -ENODEV; 565 566 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 567 cpu, sigp_set_prefix); 568 if (ccode) { 569 printk("sigp_set_prefix failed for cpu %d " 570 "with condition code %d\n", 571 (int) cpu, (int) ccode); 572 return -EIO; 573 } 574 575 idle = current_set[cpu]; 576 cpu_lowcore = lowcore_ptr[cpu]; 577 cpu_lowcore->kernel_stack = (unsigned long) 578 task_stack_page(idle) + THREAD_SIZE; 579 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 580 - sizeof(struct pt_regs) 581 - sizeof(struct stack_frame)); 582 memset(sf, 0, sizeof(struct stack_frame)); 583 sf->gprs[9] = (unsigned long) sf; 584 cpu_lowcore->save_area[15] = (unsigned long) sf; 585 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 586 asm volatile( 587 " stam 0,15,0(%0)" 588 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 589 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 590 cpu_lowcore->current_task = (unsigned long) idle; 591 cpu_lowcore->cpu_data.cpu_nr = cpu; 592 eieio(); 593 594 while (signal_processor(cpu, sigp_restart) == sigp_busy) 595 udelay(10); 596 597 while (!cpu_online(cpu)) 598 cpu_relax(); 599 return 0; 600} 601 602static unsigned int __initdata additional_cpus; 603static unsigned int __initdata possible_cpus; 604 605void __init smp_setup_cpu_possible_map(void) 606{ 607 unsigned int phy_cpus, pos_cpus, cpu; 608 609 smp_get_save_areas(); 610 phy_cpus = smp_count_cpus(); 611 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 612 613 if (possible_cpus) 614 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 615 616 for (cpu = 0; cpu < pos_cpus; cpu++) 617 cpu_set(cpu, cpu_possible_map); 618 619 phy_cpus = min(phy_cpus, pos_cpus); 620 621 for (cpu = 0; cpu < phy_cpus; cpu++) 622 cpu_set(cpu, cpu_present_map); 623} 624 625#ifdef CONFIG_HOTPLUG_CPU 626 627static int __init setup_additional_cpus(char *s) 628{ 629 additional_cpus = simple_strtoul(s, NULL, 0); 630 return 0; 631} 632early_param("additional_cpus", setup_additional_cpus); 633 634static int __init setup_possible_cpus(char *s) 635{ 636 possible_cpus = simple_strtoul(s, NULL, 0); 637 return 0; 638} 639early_param("possible_cpus", setup_possible_cpus); 640 641int __cpu_disable(void) 642{ 643 struct ec_creg_mask_parms cr_parms; 644 int cpu = smp_processor_id(); 645 646 cpu_clear(cpu, cpu_online_map); 647 648 /* Disable pfault pseudo page faults on this cpu. */ 649 pfault_fini(); 650 651 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 652 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 653 654 /* disable all external interrupts */ 655 cr_parms.orvals[0] = 0; 656 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 657 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 658 /* disable all I/O interrupts */ 659 cr_parms.orvals[6] = 0; 660 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 661 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 662 /* disable most machine checks */ 663 cr_parms.orvals[14] = 0; 664 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 665 1 << 25 | 1 << 24); 666 667 smp_ctl_bit_callback(&cr_parms); 668 669 return 0; 670} 671 672void __cpu_die(unsigned int cpu) 673{ 674 /* Wait until target cpu is down */ 675 while (!smp_cpu_not_running(cpu)) 676 cpu_relax(); 677 printk("Processor %d spun down\n", cpu); 678} 679 680void cpu_die(void) 681{ 682 idle_task_exit(); 683 signal_processor(smp_processor_id(), sigp_stop); 684 BUG(); 685 for (;;); 686} 687 688#endif /* CONFIG_HOTPLUG_CPU */ 689 690/* 691 * Cycle through the processors and setup structures. 692 */ 693 694void __init smp_prepare_cpus(unsigned int max_cpus) 695{ 696 unsigned long stack; 697 unsigned int cpu; 698 int i; 699 700 /* request the 0x1201 emergency signal external interrupt */ 701 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 702 panic("Couldn't request external interrupt 0x1201"); 703 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 704 /* 705 * Initialize prefix pages and stacks for all possible cpus 706 */ 707 print_cpu_info(&S390_lowcore.cpu_data); 708 709 for_each_possible_cpu(i) { 710 lowcore_ptr[i] = (struct _lowcore *) 711 __get_free_pages(GFP_KERNEL | GFP_DMA, 712 sizeof(void*) == 8 ? 1 : 0); 713 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 714 if (!lowcore_ptr[i] || !stack) 715 panic("smp_boot_cpus failed to allocate memory\n"); 716 717 *(lowcore_ptr[i]) = S390_lowcore; 718 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; 719 stack = __get_free_pages(GFP_KERNEL, 0); 720 if (!stack) 721 panic("smp_boot_cpus failed to allocate memory\n"); 722 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; 723#ifndef CONFIG_64BIT 724 if (MACHINE_HAS_IEEE) { 725 lowcore_ptr[i]->extended_save_area_addr = 726 (__u32) __get_free_pages(GFP_KERNEL, 0); 727 if (!lowcore_ptr[i]->extended_save_area_addr) 728 panic("smp_boot_cpus failed to " 729 "allocate memory\n"); 730 } 731#endif 732 } 733#ifndef CONFIG_64BIT 734 if (MACHINE_HAS_IEEE) 735 ctl_set_bit(14, 29); /* enable extended save area */ 736#endif 737 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 738 739 for_each_possible_cpu(cpu) 740 if (cpu != smp_processor_id()) 741 smp_create_idle(cpu); 742} 743 744void __init smp_prepare_boot_cpu(void) 745{ 746 BUG_ON(smp_processor_id() != 0); 747 748 cpu_set(0, cpu_online_map); 749 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 750 current_set[0] = current; 751} 752 753void __init smp_cpus_done(unsigned int max_cpus) 754{ 755 cpu_present_map = cpu_possible_map; 756} 757 758/* 759 * the frequency of the profiling timer can be changed 760 * by writing a multiplier value into /proc/profile. 761 * 762 * usually you want to run this on all CPUs ;) 763 */ 764int setup_profiling_timer(unsigned int multiplier) 765{ 766 return 0; 767} 768 769static DEFINE_PER_CPU(struct cpu, cpu_devices); 770 771static ssize_t show_capability(struct sys_device *dev, char *buf) 772{ 773 unsigned int capability; 774 int rc; 775 776 rc = get_cpu_capability(&capability); 777 if (rc) 778 return rc; 779 return sprintf(buf, "%u\n", capability); 780} 781static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 782 783static int __cpuinit smp_cpu_notify(struct notifier_block *self, 784 unsigned long action, void *hcpu) 785{ 786 unsigned int cpu = (unsigned int)(long)hcpu; 787 struct cpu *c = &per_cpu(cpu_devices, cpu); 788 struct sys_device *s = &c->sysdev; 789 790 switch (action) { 791 case CPU_ONLINE: 792 case CPU_ONLINE_FROZEN: 793 if (sysdev_create_file(s, &attr_capability)) 794 return NOTIFY_BAD; 795 break; 796 case CPU_DEAD: 797 case CPU_DEAD_FROZEN: 798 sysdev_remove_file(s, &attr_capability); 799 break; 800 } 801 return NOTIFY_OK; 802} 803 804static struct notifier_block __cpuinitdata smp_cpu_nb = { 805 .notifier_call = smp_cpu_notify, 806}; 807 808static int __init topology_init(void) 809{ 810 int cpu; 811 812 register_cpu_notifier(&smp_cpu_nb); 813 814 for_each_possible_cpu(cpu) { 815 struct cpu *c = &per_cpu(cpu_devices, cpu); 816 struct sys_device *s = &c->sysdev; 817 818 c->hotpluggable = 1; 819 register_cpu(c, cpu); 820 if (!cpu_online(cpu)) 821 continue; 822 s = &c->sysdev; 823 sysdev_create_file(s, &attr_capability); 824 } 825 return 0; 826} 827subsys_initcall(topology_init); 828