mp_machdep.c revision 214631
1182902Skmacy/*- 2182902Skmacy * Copyright (c) 1996, by Steve Passe 3182902Skmacy * Copyright (c) 2008, by Kip Macy 4182902Skmacy * All rights reserved. 5182902Skmacy * 6182902Skmacy * Redistribution and use in source and binary forms, with or without 7182902Skmacy * modification, are permitted provided that the following conditions 8182902Skmacy * are met: 9182902Skmacy * 1. Redistributions of source code must retain the above copyright 10182902Skmacy * notice, this list of conditions and the following disclaimer. 11182902Skmacy * 2. The name of the developer may NOT be used to endorse or promote products 12182902Skmacy * derived from this software without specific prior written permission. 13182902Skmacy * 14182902Skmacy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15182902Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16182902Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17182902Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18182902Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19182902Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20182902Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21182902Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22182902Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23182902Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24182902Skmacy * SUCH DAMAGE. 25182902Skmacy */ 26182902Skmacy 27182902Skmacy#include <sys/cdefs.h> 28182902Skmacy__FBSDID("$FreeBSD: head/sys/i386/xen/mp_machdep.c 214631 2010-11-01 18:18:46Z jhb $"); 29182902Skmacy 30182902Skmacy#include "opt_apic.h" 31182902Skmacy#include "opt_cpu.h" 32182902Skmacy#include "opt_kstack_pages.h" 33182902Skmacy#include "opt_mp_watchdog.h" 34204972Sjhb#include "opt_pmap.h" 35182902Skmacy#include "opt_sched.h" 36182902Skmacy#include "opt_smp.h" 37182902Skmacy 38182902Skmacy#if !defined(lint) 39182902Skmacy#if !defined(SMP) 40182902Skmacy#error How did you get here? 41182902Skmacy#endif 42182902Skmacy 43182902Skmacy#ifndef DEV_APIC 44182902Skmacy#error The apic device is required for SMP, add "device apic" to your config file. 45182902Skmacy#endif 46182902Skmacy#if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT) 47182902Skmacy#error SMP not supported with CPU_DISABLE_CMPXCHG 48182902Skmacy#endif 49182902Skmacy#endif /* not lint */ 50182902Skmacy 51182902Skmacy#include <sys/param.h> 52182902Skmacy#include <sys/systm.h> 53182902Skmacy#include <sys/bus.h> 54182902Skmacy#include <sys/cons.h> /* cngetc() */ 55182902Skmacy#ifdef GPROF 56182902Skmacy#include <sys/gmon.h> 57182902Skmacy#endif 58182902Skmacy#include <sys/kernel.h> 59182902Skmacy#include <sys/ktr.h> 60182902Skmacy#include <sys/lock.h> 61182902Skmacy#include <sys/malloc.h> 62182902Skmacy#include <sys/memrange.h> 63182902Skmacy#include <sys/mutex.h> 64182902Skmacy#include <sys/pcpu.h> 65182902Skmacy#include <sys/proc.h> 66182902Skmacy#include <sys/sched.h> 67182902Skmacy#include <sys/smp.h> 68182902Skmacy#include <sys/sysctl.h> 69182902Skmacy 70182902Skmacy#include <vm/vm.h> 71182902Skmacy#include <vm/vm_param.h> 72182902Skmacy#include <vm/pmap.h> 73182902Skmacy#include <vm/vm_kern.h> 74182902Skmacy#include <vm/vm_extern.h> 75182902Skmacy#include <vm/vm_page.h> 76182902Skmacy 77214631Sjhb#include <x86/apicreg.h> 78182902Skmacy#include <machine/md_var.h> 79182902Skmacy#include <machine/mp_watchdog.h> 80182902Skmacy#include <machine/pcb.h> 81182902Skmacy#include <machine/psl.h> 82182902Skmacy#include <machine/smp.h> 83182902Skmacy#include <machine/specialreg.h> 84182902Skmacy#include <machine/pcpu.h> 85182902Skmacy 86182902Skmacy 87182902Skmacy 88182902Skmacy#include <machine/xen/xen-os.h> 89186557Skmacy#include <xen/evtchn.h> 90186557Skmacy#include <xen/xen_intr.h> 91186557Skmacy#include <xen/hypervisor.h> 92182902Skmacy#include <xen/interface/vcpu.h> 93182902Skmacy 94182902Skmacy 95182902Skmacyint mp_naps; /* # of Applications processors */ 96182902Skmacyint boot_cpu_id = -1; /* designated BSP */ 97182902Skmacy 98182902Skmacyextern struct pcpu __pcpu[]; 99182902Skmacy 100182902Skmacystatic int bootAP; 101182902Skmacystatic union descriptor *bootAPgdt; 102182902Skmacy 103184112Skmacystatic char resched_name[NR_CPUS][15]; 104184112Skmacystatic char callfunc_name[NR_CPUS][15]; 105182902Skmacy 106182902Skmacy/* Free these after use */ 107182902Skmacyvoid *bootstacks[MAXCPU]; 108182902Skmacy 109182902Skmacystruct pcb stoppcbs[MAXCPU]; 110182902Skmacy 111182902Skmacy/* Variables needed for SMP tlb shootdown. */ 112182902Skmacyvm_offset_t smp_tlb_addr1; 113182902Skmacyvm_offset_t smp_tlb_addr2; 114182902Skmacyvolatile int smp_tlb_wait; 115182902Skmacy 116184112Skmacytypedef void call_data_func_t(uintptr_t , uintptr_t); 117184112Skmacy 118182902Skmacystatic u_int logical_cpus; 119196256Sattiliostatic volatile cpumask_t ipi_nmi_pending; 120182902Skmacy 121182902Skmacy/* used to hold the AP's until we are ready to release them */ 122182902Skmacystatic struct mtx ap_boot_mtx; 123182902Skmacy 124182902Skmacy/* Set to 1 once we're ready to let the APs out of the pen. */ 125182902Skmacystatic volatile int aps_ready = 0; 126182902Skmacy 127182902Skmacy/* 128182902Skmacy * Store data from cpu_add() until later in the boot when we actually setup 129182902Skmacy * the APs. 130182902Skmacy */ 131182902Skmacystruct cpu_info { 132182902Skmacy int cpu_present:1; 133182902Skmacy int cpu_bsp:1; 134182902Skmacy int cpu_disabled:1; 135182902Skmacy} static cpu_info[MAX_APIC_ID + 1]; 136182902Skmacyint cpu_apic_ids[MAXCPU]; 137187966Sbzint apic_cpuids[MAX_APIC_ID + 1]; 138182902Skmacy 139182902Skmacy/* Holds pending bitmap based IPIs per CPU */ 140182902Skmacystatic volatile u_int cpu_ipi_pending[MAXCPU]; 141182902Skmacy 142191759Skmacystatic int cpu_logical; 143191759Skmacystatic int cpu_cores; 144191759Skmacy 145182902Skmacystatic void assign_cpu_ids(void); 146182902Skmacystatic void set_interrupt_apic_ids(void); 147182902Skmacyint start_all_aps(void); 148182902Skmacystatic int start_ap(int apic_id); 149182902Skmacystatic void release_aps(void *dummy); 150182902Skmacy 151182902Skmacystatic u_int hyperthreading_cpus; 152182902Skmacystatic cpumask_t hyperthreading_cpus_mask; 153182902Skmacy 154182902Skmacyextern void Xhypervisor_callback(void); 155182902Skmacyextern void failsafe_callback(void); 156184115Skmacyextern void pmap_lazyfix_action(void); 157182902Skmacy 158182902Skmacystruct cpu_group * 159182902Skmacycpu_topo(void) 160182902Skmacy{ 161182902Skmacy if (cpu_cores == 0) 162182902Skmacy cpu_cores = 1; 163182902Skmacy if (cpu_logical == 0) 164182902Skmacy cpu_logical = 1; 165182902Skmacy if (mp_ncpus % (cpu_cores * cpu_logical) != 0) { 166182902Skmacy printf("WARNING: Non-uniform processors.\n"); 167182902Skmacy printf("WARNING: Using suboptimal topology.\n"); 168182902Skmacy return (smp_topo_none()); 169182902Skmacy } 170182902Skmacy /* 171182902Skmacy * No multi-core or hyper-threaded. 172182902Skmacy */ 173182902Skmacy if (cpu_logical * cpu_cores == 1) 174182902Skmacy return (smp_topo_none()); 175182902Skmacy /* 176182902Skmacy * Only HTT no multi-core. 177182902Skmacy */ 178182902Skmacy if (cpu_logical > 1 && cpu_cores == 1) 179182902Skmacy return (smp_topo_1level(CG_SHARE_L1, cpu_logical, CG_FLAG_HTT)); 180182902Skmacy /* 181182902Skmacy * Only multi-core no HTT. 182182902Skmacy */ 183182902Skmacy if (cpu_cores > 1 && cpu_logical == 1) 184182902Skmacy return (smp_topo_1level(CG_SHARE_NONE, cpu_cores, 0)); 185182902Skmacy /* 186182902Skmacy * Both HTT and multi-core. 187182902Skmacy */ 188182902Skmacy return (smp_topo_2level(CG_SHARE_NONE, cpu_cores, 189182902Skmacy CG_SHARE_L1, cpu_logical, CG_FLAG_HTT)); 190182902Skmacy} 191182902Skmacy 192182902Skmacy/* 193182902Skmacy * Calculate usable address in base memory for AP trampoline code. 194182902Skmacy */ 195182902Skmacyu_int 196182902Skmacymp_bootaddress(u_int basemem) 197182902Skmacy{ 198182902Skmacy 199182902Skmacy return (basemem); 200182902Skmacy} 201182902Skmacy 202182902Skmacyvoid 203182902Skmacycpu_add(u_int apic_id, char boot_cpu) 204182902Skmacy{ 205182902Skmacy 206182902Skmacy if (apic_id > MAX_APIC_ID) { 207182902Skmacy panic("SMP: APIC ID %d too high", apic_id); 208182902Skmacy return; 209182902Skmacy } 210182902Skmacy KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice", 211182902Skmacy apic_id)); 212182902Skmacy cpu_info[apic_id].cpu_present = 1; 213182902Skmacy if (boot_cpu) { 214182902Skmacy KASSERT(boot_cpu_id == -1, 215182902Skmacy ("CPU %d claims to be BSP, but CPU %d already is", apic_id, 216182902Skmacy boot_cpu_id)); 217182902Skmacy boot_cpu_id = apic_id; 218182902Skmacy cpu_info[apic_id].cpu_bsp = 1; 219182902Skmacy } 220182902Skmacy if (mp_ncpus < MAXCPU) 221182902Skmacy mp_ncpus++; 222182902Skmacy if (bootverbose) 223182902Skmacy printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" : 224182902Skmacy "AP"); 225182902Skmacy} 226182902Skmacy 227182902Skmacyvoid 228182902Skmacycpu_mp_setmaxid(void) 229182902Skmacy{ 230182902Skmacy 231182902Skmacy mp_maxid = MAXCPU - 1; 232182902Skmacy} 233182902Skmacy 234182902Skmacyint 235182902Skmacycpu_mp_probe(void) 236182902Skmacy{ 237182902Skmacy 238182902Skmacy /* 239182902Skmacy * Always record BSP in CPU map so that the mbuf init code works 240182902Skmacy * correctly. 241182902Skmacy */ 242182902Skmacy all_cpus = 1; 243182902Skmacy if (mp_ncpus == 0) { 244182902Skmacy /* 245182902Skmacy * No CPUs were found, so this must be a UP system. Setup 246182902Skmacy * the variables to represent a system with a single CPU 247182902Skmacy * with an id of 0. 248182902Skmacy */ 249182902Skmacy mp_ncpus = 1; 250182902Skmacy return (0); 251182902Skmacy } 252182902Skmacy 253182902Skmacy /* At least one CPU was found. */ 254182902Skmacy if (mp_ncpus == 1) { 255182902Skmacy /* 256182902Skmacy * One CPU was found, so this must be a UP system with 257182902Skmacy * an I/O APIC. 258182902Skmacy */ 259182902Skmacy return (0); 260182902Skmacy } 261182902Skmacy 262182902Skmacy /* At least two CPUs were found. */ 263182902Skmacy return (1); 264182902Skmacy} 265182902Skmacy 266182902Skmacy/* 267182902Skmacy * Initialize the IPI handlers and start up the AP's. 268182902Skmacy */ 269182902Skmacyvoid 270182902Skmacycpu_mp_start(void) 271182902Skmacy{ 272182902Skmacy int i; 273182902Skmacy 274182902Skmacy /* Initialize the logical ID to APIC ID table. */ 275182902Skmacy for (i = 0; i < MAXCPU; i++) { 276182902Skmacy cpu_apic_ids[i] = -1; 277182902Skmacy cpu_ipi_pending[i] = 0; 278182902Skmacy } 279182902Skmacy 280182902Skmacy /* Set boot_cpu_id if needed. */ 281182902Skmacy if (boot_cpu_id == -1) { 282182902Skmacy boot_cpu_id = PCPU_GET(apic_id); 283182902Skmacy cpu_info[boot_cpu_id].cpu_bsp = 1; 284182902Skmacy } else 285182902Skmacy KASSERT(boot_cpu_id == PCPU_GET(apic_id), 286182902Skmacy ("BSP's APIC ID doesn't match boot_cpu_id")); 287182902Skmacy cpu_apic_ids[0] = boot_cpu_id; 288187966Sbz apic_cpuids[boot_cpu_id] = 0; 289182902Skmacy 290182902Skmacy assign_cpu_ids(); 291182902Skmacy 292182902Skmacy /* Start each Application Processor */ 293182902Skmacy start_all_aps(); 294182902Skmacy 295182902Skmacy /* Setup the initial logical CPUs info. */ 296182902Skmacy logical_cpus = logical_cpus_mask = 0; 297182902Skmacy if (cpu_feature & CPUID_HTT) 298182902Skmacy logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 299182902Skmacy 300182902Skmacy set_interrupt_apic_ids(); 301182902Skmacy} 302182902Skmacy 303182902Skmacy 304184112Skmacystatic void 305184112Skmacyiv_rendezvous(uintptr_t a, uintptr_t b) 306184112Skmacy{ 307184115Skmacy smp_rendezvous_action(); 308184112Skmacy} 309184112Skmacy 310184112Skmacystatic void 311184112Skmacyiv_invltlb(uintptr_t a, uintptr_t b) 312184112Skmacy{ 313184115Skmacy xen_tlb_flush(); 314184112Skmacy} 315184112Skmacy 316184112Skmacystatic void 317184112Skmacyiv_invlpg(uintptr_t a, uintptr_t b) 318184112Skmacy{ 319184115Skmacy xen_invlpg(a); 320184112Skmacy} 321184112Skmacy 322184112Skmacystatic void 323184112Skmacyiv_invlrng(uintptr_t a, uintptr_t b) 324184112Skmacy{ 325184115Skmacy vm_offset_t start = (vm_offset_t)a; 326184115Skmacy vm_offset_t end = (vm_offset_t)b; 327184115Skmacy 328184115Skmacy while (start < end) { 329184115Skmacy xen_invlpg(start); 330184115Skmacy start += PAGE_SIZE; 331184115Skmacy } 332184112Skmacy} 333184112Skmacy 334184115Skmacy 335184112Skmacystatic void 336184112Skmacyiv_invlcache(uintptr_t a, uintptr_t b) 337184112Skmacy{ 338184115Skmacy 339184115Skmacy wbinvd(); 340184198Skmacy atomic_add_int(&smp_tlb_wait, 1); 341184112Skmacy} 342184112Skmacy 343184112Skmacystatic void 344184112Skmacyiv_lazypmap(uintptr_t a, uintptr_t b) 345184112Skmacy{ 346184115Skmacy pmap_lazyfix_action(); 347184224Skmacy atomic_add_int(&smp_tlb_wait, 1); 348184112Skmacy} 349184112Skmacy 350193154Sadrian/* 351193154Sadrian * These start from "IPI offset" APIC_IPI_INTS 352193154Sadrian */ 353193154Sadrianstatic call_data_func_t *ipi_vectors[6] = 354184112Skmacy{ 355184224Skmacy iv_rendezvous, 356184224Skmacy iv_invltlb, 357184224Skmacy iv_invlpg, 358184224Skmacy iv_invlrng, 359184224Skmacy iv_invlcache, 360184224Skmacy iv_lazypmap, 361184224Skmacy}; 362184224Skmacy 363184224Skmacy/* 364184224Skmacy * Reschedule call back. Nothing to do, 365184224Skmacy * all the work is done automatically when 366184224Skmacy * we return from the interrupt. 367184224Skmacy */ 368184224Skmacystatic int 369184224Skmacysmp_reschedule_interrupt(void *unused) 370184224Skmacy{ 371184198Skmacy int cpu = PCPU_GET(cpuid); 372184198Skmacy u_int ipi_bitmap; 373184198Skmacy 374184198Skmacy ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]); 375184198Skmacy 376184198Skmacy if (ipi_bitmap & (1 << IPI_PREEMPT)) { 377184198Skmacy#ifdef COUNT_IPIS 378184198Skmacy (*ipi_preempt_counts[cpu])++; 379184198Skmacy#endif 380184198Skmacy sched_preempt(curthread); 381184198Skmacy } 382184198Skmacy 383184198Skmacy if (ipi_bitmap & (1 << IPI_AST)) { 384184198Skmacy#ifdef COUNT_IPIS 385184198Skmacy (*ipi_ast_counts[cpu])++; 386184198Skmacy#endif 387184198Skmacy /* Nothing to do for AST */ 388184198Skmacy } 389184198Skmacy return (FILTER_HANDLED); 390184112Skmacy} 391184112Skmacy 392184112Skmacystruct _call_data { 393184224Skmacy uint16_t func_id; 394184224Skmacy uint16_t wait; 395184112Skmacy uintptr_t arg1; 396184112Skmacy uintptr_t arg2; 397184112Skmacy atomic_t started; 398184112Skmacy atomic_t finished; 399184112Skmacy}; 400184112Skmacy 401184112Skmacystatic struct _call_data *call_data; 402184112Skmacy 403184198Skmacystatic int 404184112Skmacysmp_call_function_interrupt(void *unused) 405184112Skmacy{ 406184224Skmacy call_data_func_t *func; 407184112Skmacy uintptr_t arg1 = call_data->arg1; 408184112Skmacy uintptr_t arg2 = call_data->arg2; 409184112Skmacy int wait = call_data->wait; 410184224Skmacy atomic_t *started = &call_data->started; 411184224Skmacy atomic_t *finished = &call_data->finished; 412184112Skmacy 413193154Sadrian /* We only handle function IPIs, not bitmap IPIs */ 414193154Sadrian if (call_data->func_id < APIC_IPI_INTS || call_data->func_id > IPI_BITMAP_VECTOR) 415184224Skmacy panic("invalid function id %u", call_data->func_id); 416184224Skmacy 417193154Sadrian func = ipi_vectors[call_data->func_id - APIC_IPI_INTS]; 418184112Skmacy /* 419184112Skmacy * Notify initiating CPU that I've grabbed the data and am 420184112Skmacy * about to execute the function 421184112Skmacy */ 422184112Skmacy mb(); 423184224Skmacy atomic_inc(started); 424184112Skmacy /* 425184112Skmacy * At this point the info structure may be out of scope unless wait==1 426184112Skmacy */ 427184112Skmacy (*func)(arg1, arg2); 428184112Skmacy 429184112Skmacy if (wait) { 430184112Skmacy mb(); 431184224Skmacy atomic_inc(finished); 432184112Skmacy } 433184224Skmacy atomic_add_int(&smp_tlb_wait, 1); 434184198Skmacy return (FILTER_HANDLED); 435184112Skmacy} 436184112Skmacy 437184112Skmacy/* 438182902Skmacy * Print various information about the SMP system hardware and setup. 439182902Skmacy */ 440182902Skmacyvoid 441182902Skmacycpu_mp_announce(void) 442182902Skmacy{ 443182902Skmacy int i, x; 444182902Skmacy 445182902Skmacy /* List CPUs */ 446182902Skmacy printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id); 447182902Skmacy for (i = 1, x = 0; x <= MAX_APIC_ID; x++) { 448182902Skmacy if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp) 449182902Skmacy continue; 450182902Skmacy if (cpu_info[x].cpu_disabled) 451182902Skmacy printf(" cpu (AP): APIC ID: %2d (disabled)\n", x); 452182902Skmacy else { 453182902Skmacy KASSERT(i < mp_ncpus, 454182902Skmacy ("mp_ncpus and actual cpus are out of whack")); 455182902Skmacy printf(" cpu%d (AP): APIC ID: %2d\n", i++, x); 456182902Skmacy } 457182902Skmacy } 458182902Skmacy} 459182902Skmacy 460184112Skmacystatic int 461184112Skmacyxen_smp_intr_init(unsigned int cpu) 462184112Skmacy{ 463184112Skmacy int rc; 464186557Skmacy unsigned int irq; 465186557Skmacy 466184112Skmacy per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1; 467184112Skmacy 468184112Skmacy sprintf(resched_name[cpu], "resched%u", cpu); 469184112Skmacy rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, 470184112Skmacy cpu, 471184112Skmacy resched_name[cpu], 472184112Skmacy smp_reschedule_interrupt, 473186557Skmacy INTR_FAST|INTR_TYPE_TTY|INTR_MPSAFE, &irq); 474184112Skmacy 475193082Sadrian printf("[XEN] IPI cpu=%d irq=%d vector=RESCHEDULE_VECTOR (%d)\n", 476193082Sadrian cpu, irq, RESCHEDULE_VECTOR); 477184198Skmacy 478186557Skmacy per_cpu(resched_irq, cpu) = irq; 479184112Skmacy 480184112Skmacy sprintf(callfunc_name[cpu], "callfunc%u", cpu); 481184112Skmacy rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR, 482184112Skmacy cpu, 483184112Skmacy callfunc_name[cpu], 484184112Skmacy smp_call_function_interrupt, 485186557Skmacy INTR_FAST|INTR_TYPE_TTY|INTR_MPSAFE, &irq); 486184112Skmacy if (rc < 0) 487184112Skmacy goto fail; 488186557Skmacy per_cpu(callfunc_irq, cpu) = irq; 489184112Skmacy 490193082Sadrian printf("[XEN] IPI cpu=%d irq=%d vector=CALL_FUNCTION_VECTOR (%d)\n", 491193082Sadrian cpu, irq, CALL_FUNCTION_VECTOR); 492184198Skmacy 493184198Skmacy 494184112Skmacy if ((cpu != 0) && ((rc = ap_cpu_initclocks(cpu)) != 0)) 495184112Skmacy goto fail; 496184112Skmacy 497184112Skmacy return 0; 498184112Skmacy 499184112Skmacy fail: 500184112Skmacy if (per_cpu(resched_irq, cpu) >= 0) 501186557Skmacy unbind_from_irqhandler(per_cpu(resched_irq, cpu)); 502184112Skmacy if (per_cpu(callfunc_irq, cpu) >= 0) 503186557Skmacy unbind_from_irqhandler(per_cpu(callfunc_irq, cpu)); 504184112Skmacy return rc; 505184112Skmacy} 506184112Skmacy 507184198Skmacystatic void 508184198Skmacyxen_smp_intr_init_cpus(void *unused) 509184198Skmacy{ 510184198Skmacy int i; 511184198Skmacy 512184198Skmacy for (i = 0; i < mp_ncpus; i++) 513184198Skmacy xen_smp_intr_init(i); 514184198Skmacy} 515184198Skmacy 516182902Skmacy#define MTOPSIZE (1<<(14 + PAGE_SHIFT)) 517182902Skmacy 518182902Skmacy/* 519182902Skmacy * AP CPU's call this to initialize themselves. 520182902Skmacy */ 521182902Skmacyvoid 522182902Skmacyinit_secondary(void) 523182902Skmacy{ 524182902Skmacy vm_offset_t addr; 525182902Skmacy int gsel_tss; 526182902Skmacy 527182902Skmacy 528182902Skmacy /* bootAP is set in start_ap() to our ID. */ 529182902Skmacy PCPU_SET(currentldt, _default_ldt); 530182902Skmacy gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 531182902Skmacy#if 0 532182902Skmacy gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 533182902Skmacy#endif 534182902Skmacy PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 535182902Skmacy PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 536182902Skmacy PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 537182902Skmacy#if 0 538182902Skmacy PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd); 539182902Skmacy 540182902Skmacy PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 541182902Skmacy#endif 542182902Skmacy PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 543182902Skmacy 544182902Skmacy /* 545182902Skmacy * Set to a known state: 546182902Skmacy * Set by mpboot.s: CR0_PG, CR0_PE 547182902Skmacy * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 548182902Skmacy */ 549182902Skmacy /* 550182902Skmacy * signal our startup to the BSP. 551182902Skmacy */ 552182902Skmacy mp_naps++; 553182902Skmacy 554182902Skmacy /* Spin until the BSP releases the AP's. */ 555182902Skmacy while (!aps_ready) 556182902Skmacy ia32_pause(); 557182902Skmacy 558182902Skmacy /* BSP may have changed PTD while we were waiting */ 559182902Skmacy invltlb(); 560182902Skmacy for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE) 561182902Skmacy invlpg(addr); 562182902Skmacy 563182902Skmacy /* set up FPU state on the AP */ 564189420Sjhb npxinit(); 565182902Skmacy#if 0 566182902Skmacy 567182902Skmacy /* set up SSE registers */ 568182902Skmacy enable_sse(); 569182902Skmacy#endif 570182902Skmacy#if 0 && defined(PAE) 571182902Skmacy /* Enable the PTE no-execute bit. */ 572182902Skmacy if ((amd_feature & AMDID_NX) != 0) { 573182902Skmacy uint64_t msr; 574182902Skmacy 575182902Skmacy msr = rdmsr(MSR_EFER) | EFER_NXE; 576182902Skmacy wrmsr(MSR_EFER, msr); 577182902Skmacy } 578182902Skmacy#endif 579182902Skmacy#if 0 580182902Skmacy /* A quick check from sanity claus */ 581182902Skmacy if (PCPU_GET(apic_id) != lapic_id()) { 582182902Skmacy printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 583182902Skmacy printf("SMP: actual apic_id = %d\n", lapic_id()); 584182902Skmacy printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); 585182902Skmacy panic("cpuid mismatch! boom!!"); 586182902Skmacy } 587182902Skmacy#endif 588182902Skmacy 589182902Skmacy /* Initialize curthread. */ 590182902Skmacy KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 591182902Skmacy PCPU_SET(curthread, PCPU_GET(idlethread)); 592182902Skmacy 593182902Skmacy mtx_lock_spin(&ap_boot_mtx); 594182902Skmacy#if 0 595182902Skmacy 596182902Skmacy /* Init local apic for irq's */ 597182902Skmacy lapic_setup(1); 598182902Skmacy#endif 599182902Skmacy smp_cpus++; 600182902Skmacy 601182902Skmacy CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); 602182902Skmacy printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 603182902Skmacy 604182902Skmacy /* Determine if we are a logical CPU. */ 605182902Skmacy if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0) 606182902Skmacy logical_cpus_mask |= PCPU_GET(cpumask); 607182902Skmacy 608182902Skmacy /* Determine if we are a hyperthread. */ 609182902Skmacy if (hyperthreading_cpus > 1 && 610182902Skmacy PCPU_GET(apic_id) % hyperthreading_cpus != 0) 611182902Skmacy hyperthreading_cpus_mask |= PCPU_GET(cpumask); 612182902Skmacy 613182902Skmacy /* Build our map of 'other' CPUs. */ 614182902Skmacy PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 615182902Skmacy#if 0 616182902Skmacy if (bootverbose) 617182902Skmacy lapic_dump("AP"); 618182902Skmacy#endif 619182902Skmacy if (smp_cpus == mp_ncpus) { 620182902Skmacy /* enable IPI's, tlb shootdown, freezes etc */ 621182902Skmacy atomic_store_rel_int(&smp_started, 1); 622182902Skmacy smp_active = 1; /* historic */ 623182902Skmacy } 624182902Skmacy 625182902Skmacy mtx_unlock_spin(&ap_boot_mtx); 626182902Skmacy 627182902Skmacy /* wait until all the AP's are up */ 628182902Skmacy while (smp_started == 0) 629182902Skmacy ia32_pause(); 630182902Skmacy 631183131Skmacy 632183131Skmacy PCPU_SET(curthread, PCPU_GET(idlethread)); 633182902Skmacy /* enter the scheduler */ 634182902Skmacy sched_throw(NULL); 635182902Skmacy 636182902Skmacy panic("scheduler returned us to %s", __func__); 637182902Skmacy /* NOTREACHED */ 638182902Skmacy} 639182902Skmacy 640182902Skmacy/******************************************************************* 641182902Skmacy * local functions and data 642182902Skmacy */ 643182902Skmacy 644182902Skmacy/* 645182902Skmacy * We tell the I/O APIC code about all the CPUs we want to receive 646182902Skmacy * interrupts. If we don't want certain CPUs to receive IRQs we 647182902Skmacy * can simply not tell the I/O APIC code about them in this function. 648182902Skmacy * We also do not tell it about the BSP since it tells itself about 649182902Skmacy * the BSP internally to work with UP kernels and on UP machines. 650182902Skmacy */ 651182902Skmacystatic void 652182902Skmacyset_interrupt_apic_ids(void) 653182902Skmacy{ 654182902Skmacy u_int i, apic_id; 655182902Skmacy 656182902Skmacy for (i = 0; i < MAXCPU; i++) { 657182902Skmacy apic_id = cpu_apic_ids[i]; 658182902Skmacy if (apic_id == -1) 659182902Skmacy continue; 660182902Skmacy if (cpu_info[apic_id].cpu_bsp) 661182902Skmacy continue; 662182902Skmacy if (cpu_info[apic_id].cpu_disabled) 663182902Skmacy continue; 664182902Skmacy 665182902Skmacy /* Don't let hyperthreads service interrupts. */ 666182902Skmacy if (hyperthreading_cpus > 1 && 667182902Skmacy apic_id % hyperthreading_cpus != 0) 668182902Skmacy continue; 669182902Skmacy 670182902Skmacy intr_add_cpu(i); 671182902Skmacy } 672182902Skmacy} 673182902Skmacy 674182902Skmacy/* 675182902Skmacy * Assign logical CPU IDs to local APICs. 676182902Skmacy */ 677182902Skmacystatic void 678182902Skmacyassign_cpu_ids(void) 679182902Skmacy{ 680182902Skmacy u_int i; 681182902Skmacy 682182902Skmacy /* Check for explicitly disabled CPUs. */ 683182902Skmacy for (i = 0; i <= MAX_APIC_ID; i++) { 684182902Skmacy if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp) 685182902Skmacy continue; 686182902Skmacy 687182902Skmacy /* Don't use this CPU if it has been disabled by a tunable. */ 688182902Skmacy if (resource_disabled("lapic", i)) { 689182902Skmacy cpu_info[i].cpu_disabled = 1; 690182902Skmacy continue; 691182902Skmacy } 692182902Skmacy } 693182902Skmacy 694182902Skmacy /* 695182902Skmacy * Assign CPU IDs to local APIC IDs and disable any CPUs 696182902Skmacy * beyond MAXCPU. CPU 0 has already been assigned to the BSP, 697182902Skmacy * so we only have to assign IDs for APs. 698182902Skmacy */ 699182902Skmacy mp_ncpus = 1; 700182902Skmacy for (i = 0; i <= MAX_APIC_ID; i++) { 701182902Skmacy if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp || 702182902Skmacy cpu_info[i].cpu_disabled) 703182902Skmacy continue; 704182902Skmacy 705182902Skmacy if (mp_ncpus < MAXCPU) { 706182902Skmacy cpu_apic_ids[mp_ncpus] = i; 707187966Sbz apic_cpuids[i] = mp_ncpus; 708182902Skmacy mp_ncpus++; 709182902Skmacy } else 710182902Skmacy cpu_info[i].cpu_disabled = 1; 711182902Skmacy } 712182902Skmacy KASSERT(mp_maxid >= mp_ncpus - 1, 713182902Skmacy ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid, 714182902Skmacy mp_ncpus)); 715182902Skmacy} 716182902Skmacy 717182902Skmacy/* 718182902Skmacy * start each AP in our list 719182902Skmacy */ 720182902Skmacy/* Lowest 1MB is already mapped: don't touch*/ 721182902Skmacy#define TMPMAP_START 1 722182902Skmacyint 723182902Skmacystart_all_aps(void) 724182902Skmacy{ 725182902Skmacy int x,apic_id, cpu; 726182902Skmacy struct pcpu *pc; 727182902Skmacy 728182902Skmacy mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 729182902Skmacy 730182902Skmacy /* set up temporary P==V mapping for AP boot */ 731182902Skmacy /* XXX this is a hack, we should boot the AP on its own stack/PTD */ 732182902Skmacy 733182902Skmacy /* start each AP */ 734182902Skmacy for (cpu = 1; cpu < mp_ncpus; cpu++) { 735182902Skmacy apic_id = cpu_apic_ids[cpu]; 736182902Skmacy 737182902Skmacy 738182902Skmacy bootAP = cpu; 739182902Skmacy bootAPgdt = gdt + (512*cpu); 740182902Skmacy 741182902Skmacy /* Get per-cpu data */ 742182902Skmacy pc = &__pcpu[bootAP]; 743183132Skmacy pcpu_init(pc, bootAP, sizeof(struct pcpu)); 744194784Sjeff dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP); 745182902Skmacy pc->pc_apic_id = cpu_apic_ids[bootAP]; 746182902Skmacy pc->pc_prvspace = pc; 747182902Skmacy pc->pc_curthread = 0; 748182902Skmacy 749182902Skmacy gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 750182902Skmacy gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 751182902Skmacy 752182902Skmacy PT_SET_MA(bootAPgdt, xpmap_ptom(VTOP(bootAPgdt)) | PG_V | PG_RW); 753182902Skmacy bzero(bootAPgdt, PAGE_SIZE); 754182902Skmacy for (x = 0; x < NGDT; x++) 755182902Skmacy ssdtosd(&gdt_segs[x], &bootAPgdt[x].sd); 756182902Skmacy PT_SET_MA(bootAPgdt, vtomach(bootAPgdt) | PG_V); 757183345Skmacy#ifdef notyet 758183345Skmacy 759183345Skmacy if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) { 760183345Skmacy apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); 761183345Skmacy acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id); 762183345Skmacy#ifdef CONFIG_ACPI 763183345Skmacy if (acpiid != 0xff) 764183345Skmacy x86_acpiid_to_apicid[acpiid] = apicid; 765183345Skmacy#endif 766183345Skmacy } 767183345Skmacy#endif 768183345Skmacy 769182902Skmacy /* attempt to start the Application Processor */ 770182902Skmacy if (!start_ap(cpu)) { 771182902Skmacy printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id); 772182902Skmacy /* better panic as the AP may be running loose */ 773182902Skmacy printf("panic y/n? [y] "); 774182902Skmacy if (cngetc() != 'n') 775182902Skmacy panic("bye-bye"); 776182902Skmacy } 777182902Skmacy 778182902Skmacy all_cpus |= (1 << cpu); /* record AP in CPU map */ 779182902Skmacy } 780182902Skmacy 781182902Skmacy 782182902Skmacy /* build our map of 'other' CPUs */ 783182902Skmacy PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 784182902Skmacy 785182902Skmacy pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); 786182902Skmacy 787182902Skmacy /* number of APs actually started */ 788182902Skmacy return mp_naps; 789182902Skmacy} 790182902Skmacy 791182902Skmacyextern uint8_t *pcpu_boot_stack; 792182902Skmacyextern trap_info_t trap_table[]; 793182902Skmacy 794182902Skmacystatic void 795182902Skmacysmp_trap_init(trap_info_t *trap_ctxt) 796182902Skmacy{ 797182902Skmacy const trap_info_t *t = trap_table; 798182902Skmacy 799182902Skmacy for (t = trap_table; t->address; t++) { 800182902Skmacy trap_ctxt[t->vector].flags = t->flags; 801182902Skmacy trap_ctxt[t->vector].cs = t->cs; 802182902Skmacy trap_ctxt[t->vector].address = t->address; 803182902Skmacy } 804182902Skmacy} 805182902Skmacy 806182902Skmacyextern int nkpt; 807184112Skmacystatic void 808182902Skmacycpu_initialize_context(unsigned int cpu) 809182902Skmacy{ 810182902Skmacy /* vcpu_guest_context_t is too large to allocate on the stack. 811182902Skmacy * Hence we allocate statically and protect it with a lock */ 812182902Skmacy vm_page_t m[4]; 813182902Skmacy static vcpu_guest_context_t ctxt; 814182902Skmacy vm_offset_t boot_stack; 815183131Skmacy vm_offset_t newPTD; 816183131Skmacy vm_paddr_t ma[NPGPTD]; 817182902Skmacy static int color; 818182902Skmacy int i; 819182902Skmacy 820182902Skmacy /* 821183131Skmacy * Page 0,[0-3] PTD 822183131Skmacy * Page 1, [4] boot stack 823183131Skmacy * Page [5] PDPT 824182902Skmacy * 825182902Skmacy */ 826183131Skmacy for (i = 0; i < NPGPTD + 2; i++) { 827182902Skmacy m[i] = vm_page_alloc(NULL, color++, 828182902Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 829182902Skmacy VM_ALLOC_ZERO); 830182902Skmacy 831182902Skmacy pmap_zero_page(m[i]); 832182902Skmacy 833182902Skmacy } 834183131Skmacy boot_stack = kmem_alloc_nofault(kernel_map, 1); 835183131Skmacy newPTD = kmem_alloc_nofault(kernel_map, NPGPTD); 836183131Skmacy ma[0] = xpmap_ptom(VM_PAGE_TO_PHYS(m[0]))|PG_V; 837182902Skmacy 838183131Skmacy#ifdef PAE 839183131Skmacy pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1])); 840183131Skmacy for (i = 0; i < NPGPTD; i++) { 841183131Skmacy ((vm_paddr_t *)boot_stack)[i] = 842183131Skmacy ma[i] = 843183131Skmacy xpmap_ptom(VM_PAGE_TO_PHYS(m[i]))|PG_V; 844182902Skmacy } 845183131Skmacy#endif 846182902Skmacy 847182902Skmacy /* 848182902Skmacy * Copy cpu0 IdlePTD to new IdlePTD - copying only 849182902Skmacy * kernel mappings 850182902Skmacy */ 851183131Skmacy pmap_qenter(newPTD, m, 4); 852183131Skmacy 853183131Skmacy memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t), 854183131Skmacy (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t), 855182902Skmacy nkpt*sizeof(vm_paddr_t)); 856183131Skmacy 857183131Skmacy pmap_qremove(newPTD, 4); 858183131Skmacy kmem_free(kernel_map, newPTD, 4); 859182902Skmacy /* 860182902Skmacy * map actual idle stack to boot_stack 861182902Skmacy */ 862183131Skmacy pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD])); 863182902Skmacy 864182902Skmacy 865183131Skmacy xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1]))); 866182902Skmacy vm_page_lock_queues(); 867182902Skmacy for (i = 0; i < 4; i++) { 868183131Skmacy int pdir = (PTDPTDI + i) / NPDEPG; 869183131Skmacy int curoffset = (PTDPTDI + i) % NPDEPG; 870183131Skmacy 871182902Skmacy xen_queue_pt_update((vm_paddr_t) 872183131Skmacy ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 873182902Skmacy ma[i]); 874182902Skmacy } 875182902Skmacy PT_UPDATES_FLUSH(); 876182902Skmacy vm_page_unlock_queues(); 877182902Skmacy 878182902Skmacy memset(&ctxt, 0, sizeof(ctxt)); 879182902Skmacy ctxt.flags = VGCF_IN_KERNEL; 880182902Skmacy ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL); 881182902Skmacy ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL); 882182902Skmacy ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL); 883182902Skmacy ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL); 884182902Skmacy ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL); 885182902Skmacy ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL); 886182902Skmacy ctxt.user_regs.eip = (unsigned long)init_secondary; 887182902Skmacy ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */ 888182902Skmacy 889182902Skmacy memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt)); 890182902Skmacy 891182902Skmacy smp_trap_init(ctxt.trap_ctxt); 892182902Skmacy 893182902Skmacy ctxt.ldt_ents = 0; 894182902Skmacy ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT); 895182902Skmacy ctxt.gdt_ents = 512; 896182902Skmacy 897182902Skmacy#ifdef __i386__ 898182902Skmacy ctxt.user_regs.esp = boot_stack + PAGE_SIZE; 899182902Skmacy 900182902Skmacy ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL); 901182902Skmacy ctxt.kernel_sp = boot_stack + PAGE_SIZE; 902182902Skmacy 903182902Skmacy ctxt.event_callback_cs = GSEL(GCODE_SEL, SEL_KPL); 904182902Skmacy ctxt.event_callback_eip = (unsigned long)Xhypervisor_callback; 905182902Skmacy ctxt.failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL); 906182902Skmacy ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback; 907182902Skmacy 908183131Skmacy ctxt.ctrlreg[3] = xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1])); 909182902Skmacy#else /* __x86_64__ */ 910182902Skmacy ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs); 911182902Skmacy ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL); 912182902Skmacy ctxt.kernel_sp = idle->thread.rsp0; 913182902Skmacy 914182902Skmacy ctxt.event_callback_eip = (unsigned long)hypervisor_callback; 915182902Skmacy ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback; 916182902Skmacy ctxt.syscall_callback_eip = (unsigned long)system_call; 917182902Skmacy 918182902Skmacy ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt)); 919182902Skmacy 920182902Skmacy ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu)); 921182902Skmacy#endif 922182902Skmacy 923182902Skmacy printf("gdtpfn=%lx pdptpfn=%lx\n", 924182902Skmacy ctxt.gdt_frames[0], 925182902Skmacy ctxt.ctrlreg[3] >> PAGE_SHIFT); 926182902Skmacy 927182902Skmacy PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt)); 928182902Skmacy DELAY(3000); 929182902Skmacy PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)); 930182902Skmacy} 931182902Skmacy 932182902Skmacy/* 933182902Skmacy * This function starts the AP (application processor) identified 934182902Skmacy * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 935182902Skmacy * to accomplish this. This is necessary because of the nuances 936182902Skmacy * of the different hardware we might encounter. It isn't pretty, 937182902Skmacy * but it seems to work. 938182902Skmacy */ 939183131Skmacy 940183131Skmacyint cpus; 941182902Skmacystatic int 942182902Skmacystart_ap(int apic_id) 943182902Skmacy{ 944182902Skmacy int ms; 945182902Skmacy 946182902Skmacy /* used as a watchpoint to signal AP startup */ 947182902Skmacy cpus = mp_naps; 948182902Skmacy 949182902Skmacy cpu_initialize_context(apic_id); 950182902Skmacy 951182902Skmacy /* Wait up to 5 seconds for it to start. */ 952182902Skmacy for (ms = 0; ms < 5000; ms++) { 953182902Skmacy if (mp_naps > cpus) 954182902Skmacy return 1; /* return SUCCESS */ 955182902Skmacy DELAY(1000); 956182902Skmacy } 957182902Skmacy return 0; /* return FAILURE */ 958182902Skmacy} 959182902Skmacy 960182902Skmacy/* 961182902Skmacy * Flush the TLB on all other CPU's 962182902Skmacy */ 963182902Skmacystatic void 964182902Skmacysmp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2) 965182902Skmacy{ 966182902Skmacy u_int ncpu; 967184198Skmacy struct _call_data data; 968182902Skmacy 969182902Skmacy ncpu = mp_ncpus - 1; /* does not shootdown self */ 970182902Skmacy if (ncpu < 1) 971182902Skmacy return; /* no other cpus */ 972182902Skmacy if (!(read_eflags() & PSL_I)) 973182902Skmacy panic("%s: interrupts disabled", __func__); 974182902Skmacy mtx_lock_spin(&smp_ipi_mtx); 975193098Sadrian KASSERT(call_data == NULL, ("call_data isn't null?!")); 976193098Sadrian call_data = &data; 977184224Skmacy call_data->func_id = vector; 978184112Skmacy call_data->arg1 = addr1; 979184112Skmacy call_data->arg2 = addr2; 980182902Skmacy atomic_store_rel_int(&smp_tlb_wait, 0); 981182902Skmacy ipi_all_but_self(vector); 982182902Skmacy while (smp_tlb_wait < ncpu) 983182902Skmacy ia32_pause(); 984184224Skmacy call_data = NULL; 985182902Skmacy mtx_unlock_spin(&smp_ipi_mtx); 986182902Skmacy} 987182902Skmacy 988182902Skmacystatic void 989192114Sattiliosmp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) 990182902Skmacy{ 991182902Skmacy int ncpu, othercpus; 992184224Skmacy struct _call_data data; 993182902Skmacy 994182902Skmacy othercpus = mp_ncpus - 1; 995182902Skmacy if (mask == (u_int)-1) { 996182902Skmacy ncpu = othercpus; 997182902Skmacy if (ncpu < 1) 998182902Skmacy return; 999182902Skmacy } else { 1000182902Skmacy mask &= ~PCPU_GET(cpumask); 1001182902Skmacy if (mask == 0) 1002182902Skmacy return; 1003182902Skmacy ncpu = bitcount32(mask); 1004182902Skmacy if (ncpu > othercpus) { 1005182902Skmacy /* XXX this should be a panic offence */ 1006182902Skmacy printf("SMP: tlb shootdown to %d other cpus (only have %d)\n", 1007182902Skmacy ncpu, othercpus); 1008182902Skmacy ncpu = othercpus; 1009182902Skmacy } 1010182902Skmacy /* XXX should be a panic, implied by mask == 0 above */ 1011182902Skmacy if (ncpu < 1) 1012182902Skmacy return; 1013182902Skmacy } 1014182902Skmacy if (!(read_eflags() & PSL_I)) 1015182902Skmacy panic("%s: interrupts disabled", __func__); 1016182902Skmacy mtx_lock_spin(&smp_ipi_mtx); 1017193098Sadrian KASSERT(call_data == NULL, ("call_data isn't null?!")); 1018184224Skmacy call_data = &data; 1019184224Skmacy call_data->func_id = vector; 1020184224Skmacy call_data->arg1 = addr1; 1021184224Skmacy call_data->arg2 = addr2; 1022182902Skmacy atomic_store_rel_int(&smp_tlb_wait, 0); 1023182902Skmacy if (mask == (u_int)-1) 1024182902Skmacy ipi_all_but_self(vector); 1025182902Skmacy else 1026182902Skmacy ipi_selected(mask, vector); 1027182902Skmacy while (smp_tlb_wait < ncpu) 1028182902Skmacy ia32_pause(); 1029184224Skmacy call_data = NULL; 1030182902Skmacy mtx_unlock_spin(&smp_ipi_mtx); 1031182902Skmacy} 1032182902Skmacy 1033182902Skmacyvoid 1034182902Skmacysmp_cache_flush(void) 1035182902Skmacy{ 1036182902Skmacy 1037182902Skmacy if (smp_started) 1038182902Skmacy smp_tlb_shootdown(IPI_INVLCACHE, 0, 0); 1039182902Skmacy} 1040182902Skmacy 1041182902Skmacyvoid 1042182902Skmacysmp_invltlb(void) 1043182902Skmacy{ 1044182902Skmacy 1045182902Skmacy if (smp_started) { 1046182902Skmacy smp_tlb_shootdown(IPI_INVLTLB, 0, 0); 1047182902Skmacy } 1048182902Skmacy} 1049182902Skmacy 1050182902Skmacyvoid 1051182902Skmacysmp_invlpg(vm_offset_t addr) 1052182902Skmacy{ 1053182902Skmacy 1054182902Skmacy if (smp_started) { 1055182902Skmacy smp_tlb_shootdown(IPI_INVLPG, addr, 0); 1056182902Skmacy } 1057182902Skmacy} 1058182902Skmacy 1059182902Skmacyvoid 1060182902Skmacysmp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2) 1061182902Skmacy{ 1062182902Skmacy 1063182902Skmacy if (smp_started) { 1064182902Skmacy smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2); 1065182902Skmacy } 1066182902Skmacy} 1067182902Skmacy 1068182902Skmacyvoid 1069192114Sattiliosmp_masked_invltlb(cpumask_t mask) 1070182902Skmacy{ 1071182902Skmacy 1072182902Skmacy if (smp_started) { 1073182902Skmacy smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0); 1074182902Skmacy } 1075182902Skmacy} 1076182902Skmacy 1077182902Skmacyvoid 1078192114Sattiliosmp_masked_invlpg(cpumask_t mask, vm_offset_t addr) 1079182902Skmacy{ 1080182902Skmacy 1081182902Skmacy if (smp_started) { 1082182902Skmacy smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0); 1083182902Skmacy } 1084182902Skmacy} 1085182902Skmacy 1086182902Skmacyvoid 1087192114Sattiliosmp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2) 1088182902Skmacy{ 1089182902Skmacy 1090182902Skmacy if (smp_started) { 1091182902Skmacy smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2); 1092182902Skmacy } 1093182902Skmacy} 1094182902Skmacy 1095182902Skmacy/* 1096182902Skmacy * send an IPI to a set of cpus. 1097182902Skmacy */ 1098182902Skmacyvoid 1099192114Sattilioipi_selected(cpumask_t cpus, u_int ipi) 1100182902Skmacy{ 1101182902Skmacy int cpu; 1102182902Skmacy u_int bitmap = 0; 1103182902Skmacy u_int old_pending; 1104182902Skmacy u_int new_pending; 1105184198Skmacy 1106182902Skmacy if (IPI_IS_BITMAPED(ipi)) { 1107182902Skmacy bitmap = 1 << ipi; 1108182902Skmacy ipi = IPI_BITMAP_VECTOR; 1109184224Skmacy } 1110182902Skmacy 1111196256Sattilio /* 1112196256Sattilio * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1113196256Sattilio * of help in order to understand what is the source. 1114196256Sattilio * Set the mask of receiving CPUs for this purpose. 1115196256Sattilio */ 1116196256Sattilio if (ipi == IPI_STOP_HARD) 1117196256Sattilio atomic_set_int(&ipi_nmi_pending, cpus); 1118196256Sattilio 1119182902Skmacy CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); 1120182902Skmacy while ((cpu = ffs(cpus)) != 0) { 1121182902Skmacy cpu--; 1122182902Skmacy cpus &= ~(1 << cpu); 1123182902Skmacy 1124182902Skmacy if (bitmap) { 1125182902Skmacy do { 1126182902Skmacy old_pending = cpu_ipi_pending[cpu]; 1127182902Skmacy new_pending = old_pending | bitmap; 1128210939Sjhb } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu], 1129210939Sjhb old_pending, new_pending)); 1130184224Skmacy if (!old_pending) 1131184224Skmacy ipi_pcpu(cpu, RESCHEDULE_VECTOR); 1132193094Sadrian } else { 1133193094Sadrian KASSERT(call_data != NULL, ("call_data not set")); 1134193094Sadrian ipi_pcpu(cpu, CALL_FUNCTION_VECTOR); 1135182902Skmacy } 1136182902Skmacy } 1137182902Skmacy} 1138182902Skmacy 1139182902Skmacy/* 1140210939Sjhb * send an IPI to a specific CPU. 1141210939Sjhb */ 1142210939Sjhbvoid 1143210939Sjhbipi_cpu(int cpu, u_int ipi) 1144210939Sjhb{ 1145210939Sjhb u_int bitmap = 0; 1146210939Sjhb u_int old_pending; 1147210939Sjhb u_int new_pending; 1148210939Sjhb 1149210939Sjhb if (IPI_IS_BITMAPED(ipi)) { 1150210939Sjhb bitmap = 1 << ipi; 1151210939Sjhb ipi = IPI_BITMAP_VECTOR; 1152210939Sjhb } 1153210939Sjhb 1154210939Sjhb /* 1155210939Sjhb * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1156210939Sjhb * of help in order to understand what is the source. 1157210939Sjhb * Set the mask of receiving CPUs for this purpose. 1158210939Sjhb */ 1159210939Sjhb if (ipi == IPI_STOP_HARD) 1160210939Sjhb atomic_set_int(&ipi_nmi_pending, 1 << cpu); 1161210939Sjhb 1162210939Sjhb CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); 1163210939Sjhb 1164210939Sjhb if (bitmap) { 1165210939Sjhb do { 1166210939Sjhb old_pending = cpu_ipi_pending[cpu]; 1167210939Sjhb new_pending = old_pending | bitmap; 1168210939Sjhb } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu], 1169210939Sjhb old_pending, new_pending)); 1170210939Sjhb if (!old_pending) 1171210939Sjhb ipi_pcpu(cpu, RESCHEDULE_VECTOR); 1172210939Sjhb } else { 1173210939Sjhb KASSERT(call_data != NULL, ("call_data not set")); 1174210939Sjhb ipi_pcpu(cpu, CALL_FUNCTION_VECTOR); 1175210939Sjhb } 1176210939Sjhb} 1177210939Sjhb 1178210939Sjhb/* 1179182902Skmacy * send an IPI to all CPUs EXCEPT myself 1180182902Skmacy */ 1181182902Skmacyvoid 1182182902Skmacyipi_all_but_self(u_int ipi) 1183182902Skmacy{ 1184196256Sattilio 1185196256Sattilio /* 1186196256Sattilio * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1187196256Sattilio * of help in order to understand what is the source. 1188196256Sattilio * Set the mask of receiving CPUs for this purpose. 1189196256Sattilio */ 1190196256Sattilio if (ipi == IPI_STOP_HARD) 1191196256Sattilio atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus)); 1192196256Sattilio 1193182902Skmacy CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1194184224Skmacy ipi_selected(PCPU_GET(other_cpus), ipi); 1195182902Skmacy} 1196182902Skmacy 1197196256Sattilioint 1198196256Sattilioipi_nmi_handler() 1199196256Sattilio{ 1200196256Sattilio cpumask_t cpumask; 1201196256Sattilio 1202196256Sattilio /* 1203196256Sattilio * As long as there is not a simple way to know about a NMI's 1204196256Sattilio * source, if the bitmask for the current CPU is present in 1205196256Sattilio * the global pending bitword an IPI_STOP_HARD has been issued 1206196256Sattilio * and should be handled. 1207196256Sattilio */ 1208196256Sattilio cpumask = PCPU_GET(cpumask); 1209196256Sattilio if ((ipi_nmi_pending & cpumask) == 0) 1210196256Sattilio return (1); 1211196256Sattilio 1212196256Sattilio atomic_clear_int(&ipi_nmi_pending, cpumask); 1213196256Sattilio cpustop_handler(); 1214196256Sattilio return (0); 1215196256Sattilio} 1216196256Sattilio 1217182902Skmacy/* 1218182902Skmacy * Handle an IPI_STOP by saving our current context and spinning until we 1219182902Skmacy * are resumed. 1220182902Skmacy */ 1221182902Skmacyvoid 1222182902Skmacycpustop_handler(void) 1223182902Skmacy{ 1224182902Skmacy int cpu = PCPU_GET(cpuid); 1225182902Skmacy int cpumask = PCPU_GET(cpumask); 1226182902Skmacy 1227182902Skmacy savectx(&stoppcbs[cpu]); 1228182902Skmacy 1229182902Skmacy /* Indicate that we are stopped */ 1230182902Skmacy atomic_set_int(&stopped_cpus, cpumask); 1231182902Skmacy 1232182902Skmacy /* Wait for restart */ 1233182902Skmacy while (!(started_cpus & cpumask)) 1234182902Skmacy ia32_pause(); 1235182902Skmacy 1236182902Skmacy atomic_clear_int(&started_cpus, cpumask); 1237182902Skmacy atomic_clear_int(&stopped_cpus, cpumask); 1238182902Skmacy 1239182902Skmacy if (cpu == 0 && cpustop_restartfunc != NULL) { 1240182902Skmacy cpustop_restartfunc(); 1241182902Skmacy cpustop_restartfunc = NULL; 1242182902Skmacy } 1243182902Skmacy} 1244182902Skmacy 1245182902Skmacy/* 1246182902Skmacy * This is called once the rest of the system is up and running and we're 1247182902Skmacy * ready to let the AP's out of the pen. 1248182902Skmacy */ 1249182902Skmacystatic void 1250182902Skmacyrelease_aps(void *dummy __unused) 1251182902Skmacy{ 1252182902Skmacy 1253182902Skmacy if (mp_ncpus == 1) 1254182902Skmacy return; 1255182902Skmacy atomic_store_rel_int(&aps_ready, 1); 1256182902Skmacy while (smp_started == 0) 1257182902Skmacy ia32_pause(); 1258182902Skmacy} 1259182902SkmacySYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 1260184198SkmacySYSINIT(start_ipis, SI_SUB_INTR, SI_ORDER_ANY, xen_smp_intr_init_cpus, NULL); 1261182902Skmacy 1262