166458Sdfr/*-
2145092Smarcel * Copyright (c) 2001-2005 Marcel Moolenaar
366458Sdfr * Copyright (c) 2000 Doug Rabson
466458Sdfr * All rights reserved.
566458Sdfr *
666458Sdfr * Redistribution and use in source and binary forms, with or without
766458Sdfr * modification, are permitted provided that the following conditions
866458Sdfr * are met:
966458Sdfr * 1. Redistributions of source code must retain the above copyright
1066458Sdfr *    notice, this list of conditions and the following disclaimer.
1166458Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1266458Sdfr *    notice, this list of conditions and the following disclaimer in the
1366458Sdfr *    documentation and/or other materials provided with the distribution.
1466458Sdfr *
1566458Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1666458Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1766458Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1866458Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1966458Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2066458Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2166458Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2266458Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2366458Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2466458Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2566458Sdfr * SUCH DAMAGE.
2666458Sdfr */
2766458Sdfr
28145092Smarcel#include <sys/cdefs.h>
29145092Smarcel__FBSDID("$FreeBSD$");
30145092Smarcel
31118239Speter#include "opt_kstack_pages.h"
32118239Speter
3366458Sdfr#include <sys/param.h>
3466458Sdfr#include <sys/systm.h>
3566458Sdfr#include <sys/ktr.h>
3666458Sdfr#include <sys/proc.h>
37171740Smarcel#include <sys/bus.h>
38192918Srink#include <sys/kthread.h>
3966458Sdfr#include <sys/lock.h>
4066458Sdfr#include <sys/malloc.h>
4174733Sjhb#include <sys/mutex.h>
4266458Sdfr#include <sys/kernel.h>
4376440Sjhb#include <sys/pcpu.h>
44170359Smarcel#include <sys/sched.h>
4576078Sjhb#include <sys/smp.h>
4666458Sdfr#include <sys/sysctl.h>
4797443Smarcel#include <sys/uuid.h>
4866458Sdfr
4966458Sdfr#include <machine/atomic.h>
50221271Smarcel#include <machine/bootinfo.h>
51171740Smarcel#include <machine/cpu.h>
52118990Smarcel#include <machine/fpu.h>
53171740Smarcel#include <machine/intr.h>
54118990Smarcel#include <machine/mca.h>
55118990Smarcel#include <machine/md_var.h>
5685276Smarcel#include <machine/pal.h>
5792287Sdfr#include <machine/pcb.h>
5885656Smarcel#include <machine/sal.h>
5991779Sjake#include <machine/smp.h>
6066458Sdfr
61205234Smarcel#include <vm/vm.h>
62205234Smarcel#include <vm/pmap.h>
63205234Smarcel#include <vm/vm_extern.h>
64205234Smarcel#include <vm/vm_kern.h>
65205234Smarcel
66221271Smarcelextern uint64_t bdata[];
67221271Smarcel
68178309SmarcelMALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations");
69115084Smarcel
7088695Smarcelvoid ia64_ap_startup(void);
7188695Smarcel
72221271Smarcel#define	SAPIC_ID_GET_ID(x)	((u_int)((x) >> 8) & 0xff)
73221271Smarcel#define	SAPIC_ID_GET_EID(x)	((u_int)(x) & 0xff)
74221271Smarcel#define	SAPIC_ID_SET(id, eid)	((u_int)(((id) & 0xff) << 8) | ((eid) & 0xff))
7566458Sdfr
76221271Smarcel/* State used to wake and bootstrap APs. */
77221271Smarcelstruct ia64_ap_state ia64_ap_state;
7886291Smarcel
79205234Smarcelint ia64_ipi_ast;
80223526Smarcelint ia64_ipi_hardclock;
81205234Smarcelint ia64_ipi_highfp;
82205234Smarcelint ia64_ipi_nmi;
83205234Smarcelint ia64_ipi_preempt;
84205234Smarcelint ia64_ipi_rndzvs;
85205234Smarcelint ia64_ipi_stop;
8685656Smarcel
87205234Smarcelstatic u_int
88221271Smarcelsz2shft(uint64_t sz)
89221271Smarcel{
90221271Smarcel	uint64_t s;
91221271Smarcel	u_int shft;
92221271Smarcel
93221271Smarcel	shft = 12;      /* Start with 4K */
94221271Smarcel	s = 1 << shft;
95221271Smarcel	while (s < sz) {
96221271Smarcel		shft++;
97221271Smarcel		s <<= 1;
98221271Smarcel	}
99221271Smarcel	return (shft);
100221271Smarcel}
101221271Smarcel
102221271Smarcelstatic u_int
103205234Smarcelia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf)
104205234Smarcel{
105205234Smarcel
106205234Smarcel	PCPU_INC(md.stats.pcs_nasts);
107205234Smarcel	CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
108205234Smarcel	return (0);
109205234Smarcel}
110205234Smarcel
111205234Smarcelstatic u_int
112223526Smarcelia64_ih_hardclock(struct thread *td, u_int xiv, struct trapframe *tf)
113223526Smarcel{
114223526Smarcel
115223526Smarcel	PCPU_INC(md.stats.pcs_nhardclocks);
116223526Smarcel	CTR1(KTR_SMP, "IPI_HARDCLOCK, cpuid=%d", PCPU_GET(cpuid));
117223526Smarcel	hardclockintr();
118223526Smarcel	return (0);
119223526Smarcel}
120223526Smarcel
121223526Smarcelstatic u_int
122205234Smarcelia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf)
123205234Smarcel{
124205234Smarcel
125205234Smarcel	PCPU_INC(md.stats.pcs_nhighfps);
126205234Smarcel	ia64_highfp_save_ipi();
127205234Smarcel	return (0);
128205234Smarcel}
129205234Smarcel
130205234Smarcelstatic u_int
131205234Smarcelia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf)
132205234Smarcel{
133205234Smarcel
134205234Smarcel	PCPU_INC(md.stats.pcs_npreempts);
135205234Smarcel	CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid));
136205234Smarcel	sched_preempt(curthread);
137205234Smarcel	return (0);
138205234Smarcel}
139205234Smarcel
140205234Smarcelstatic u_int
141205234Smarcelia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
142205234Smarcel{
143205234Smarcel
144205234Smarcel	PCPU_INC(md.stats.pcs_nrdvs);
145205234Smarcel	CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
146205234Smarcel	smp_rendezvous_action();
147205234Smarcel	return (0);
148205234Smarcel}
149205234Smarcel
150205234Smarcelstatic u_int
151205234Smarcelia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
152205234Smarcel{
153223758Sattilio	u_int cpuid;
154205234Smarcel
155205234Smarcel	PCPU_INC(md.stats.pcs_nstops);
156223758Sattilio	cpuid = PCPU_GET(cpuid);
157205234Smarcel
158205234Smarcel	savectx(PCPU_PTR(md.pcb));
159205234Smarcel
160223758Sattilio	CPU_SET_ATOMIC(cpuid, &stopped_cpus);
161223758Sattilio	while (!CPU_ISSET(cpuid, &started_cpus))
162205234Smarcel		cpu_spinwait();
163223758Sattilio	CPU_CLR_ATOMIC(cpuid, &started_cpus);
164223758Sattilio	CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
165205234Smarcel	return (0);
166205234Smarcel}
167205234Smarcel
168176734Sjeffstruct cpu_group *
169176734Sjeffcpu_topo(void)
170176734Sjeff{
171176734Sjeff
172176734Sjeff	return smp_topo_none();
173176734Sjeff}
174176734Sjeff
175192918Srinkstatic void
176192918Srinkia64_store_mca_state(void* arg)
177192918Srink{
178206558Smarcel	struct pcpu *pc = arg;
179206558Smarcel	struct thread *td = curthread;
180192918Srink
181206558Smarcel	/*
182206558Smarcel	 * ia64_mca_save_state() is CPU-sensitive, so bind ourself to our
183206558Smarcel	 * target CPU.
184206558Smarcel	 */
185192918Srink	thread_lock(td);
186206558Smarcel	sched_bind(td, pc->pc_cpuid);
187192918Srink	thread_unlock(td);
188192918Srink
189209671Smarcel	ia64_mca_init_ap();
190209671Smarcel
191192918Srink	/*
192192918Srink	 * Get and save the CPU specific MCA records. Should we get the
193192918Srink	 * MCA state for each processor, or just the CMC state?
194192918Srink	 */
195192918Srink	ia64_mca_save_state(SAL_INFO_MCA);
196192918Srink	ia64_mca_save_state(SAL_INFO_CMC);
197192918Srink
198192918Srink	kproc_exit(0);
199192918Srink}
200192918Srink
20185656Smarcelvoid
20285656Smarcelia64_ap_startup(void)
20385656Smarcel{
204200200Smarcel	uint64_t vhpt;
205115084Smarcel
206221271Smarcel	ia64_ap_state.as_trace = 0x100;
207221271Smarcel
208221271Smarcel	ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
209221271Smarcel	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2));
210221271Smarcel	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2));
211221271Smarcel	ia64_srlz_d();
212221271Smarcel
213221271Smarcel	pcpup = ia64_ap_state.as_pcpu;
214115084Smarcel	ia64_set_k4((intptr_t)pcpup);
21592287Sdfr
216221271Smarcel	ia64_ap_state.as_trace = 0x108;
217221271Smarcel
218200207Smarcel	vhpt = PCPU_GET(md.vhpt);
219200200Smarcel	map_vhpt(vhpt);
220200200Smarcel	ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
221171722Smarcel	ia64_srlz_i();
222148807Smarcel
223221271Smarcel	ia64_ap_state.as_trace = 0x110;
224148807Smarcel
225221271Smarcel	ia64_ap_state.as_awake = 1;
226221271Smarcel	ia64_ap_state.as_delay = 0;
227221271Smarcel
228115084Smarcel	map_pal_code();
229115084Smarcel	map_gateway_page();
23094496Sdfr
231115084Smarcel	ia64_set_fpsr(IA64_FPSR_DEFAULT);
23286291Smarcel
23388695Smarcel	/* Wait until it's time for us to be unleashed */
234221271Smarcel	while (ia64_ap_state.as_spin)
235171740Smarcel		cpu_spinwait();
23686204Smarcel
237145092Smarcel	/* Initialize curthread. */
238145092Smarcel	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
239145092Smarcel	PCPU_SET(curthread, PCPU_GET(idlethread));
240145092Smarcel
241221271Smarcel	atomic_add_int(&ia64_ap_state.as_awake, 1);
24292287Sdfr	while (!smp_started)
243171740Smarcel		cpu_spinwait();
24492287Sdfr
24588695Smarcel	CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
24686204Smarcel
247223526Smarcel	cpu_initclocks();
248223526Smarcel
249171722Smarcel	ia64_set_tpr(0);
250171722Smarcel	ia64_srlz_d();
251223526Smarcel
252205713Smarcel	ia64_enable_intr();
253171722Smarcel
254170303Sjeff	sched_throw(NULL);
255115084Smarcel	/* NOTREACHED */
25685656Smarcel}
25785656Smarcel
258122947Sjhbvoid
259122947Sjhbcpu_mp_setmaxid(void)
26066458Sdfr{
261122947Sjhb
26286291Smarcel	/*
26396442Smarcel	 * Count the number of processors in the system by walking the ACPI
26496442Smarcel	 * tables. Note that we record the actual number of processors, even
26596442Smarcel	 * if this is larger than MAXCPU. We only activate MAXCPU processors.
26686291Smarcel	 */
26796442Smarcel	mp_ncpus = ia64_count_cpus();
26896442Smarcel
26996442Smarcel	/*
27096442Smarcel	 * Set the largest cpuid we're going to use. This is necessary for
27196442Smarcel	 * VM initialization.
27296442Smarcel	 */
27396442Smarcel	mp_maxid = min(mp_ncpus, MAXCPU) - 1;
274122947Sjhb}
27596442Smarcel
276122947Sjhbint
277122947Sjhbcpu_mp_probe(void)
278122947Sjhb{
279122947Sjhb
28096442Smarcel	/*
28196442Smarcel	 * If there's only 1 processor, or we don't have a wake-up vector,
28296442Smarcel	 * we're not going to enable SMP. Note that no wake-up vector can
28396442Smarcel	 * also mean that the wake-up mechanism is not supported. In this
28496442Smarcel	 * case we can have multiple processors, but we simply can't wake
28596442Smarcel	 * them up...
28696442Smarcel	 */
287205234Smarcel	return (mp_ncpus > 1 && ia64_ipi_wakeup != 0);
28866458Sdfr}
28966458Sdfr
29066458Sdfrvoid
291221271Smarcelcpu_mp_add(u_int acpi_id, u_int id, u_int eid)
29266458Sdfr{
29388695Smarcel	struct pcpu *pc;
294194784Sjeff	void *dpcpu;
295221271Smarcel	u_int cpuid, sapic_id;
29685656Smarcel
297221271Smarcel	sapic_id = SAPIC_ID_SET(id, eid);
298221271Smarcel	cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id)
299221271Smarcel	    ? 0 : smp_cpus++;
30085656Smarcel
301222813Sattilio	KASSERT(!CPU_ISSET(cpuid, &all_cpus),
302221271Smarcel	    ("%s: cpu%d already in CPU map", __func__, acpi_id));
30388695Smarcel
304196268Smarcel	if (cpuid != 0) {
305178309Smarcel		pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK);
306196268Smarcel		pcpu_init(pc, cpuid, sizeof(*pc));
307194784Sjeff		dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
308196268Smarcel		dpcpu_init(dpcpu, cpuid);
30988695Smarcel	} else
31088695Smarcel		pc = pcpup;
31188695Smarcel
312221271Smarcel	pc->pc_acpi_id = acpi_id;
313221271Smarcel	pc->pc_md.lid = IA64_LID_SET_SAPIC_ID(sapic_id);
314221271Smarcel
315222813Sattilio	CPU_SET(pc->pc_cpuid, &all_cpus);
31666458Sdfr}
31766458Sdfr
31866458Sdfrvoid
31976078Sjhbcpu_mp_announce()
32066458Sdfr{
32188695Smarcel	struct pcpu *pc;
322221271Smarcel	uint32_t sapic_id;
32388695Smarcel	int i;
32485656Smarcel
32596442Smarcel	for (i = 0; i <= mp_maxid; i++) {
32688695Smarcel		pc = pcpu_find(i);
32788695Smarcel		if (pc != NULL) {
328221271Smarcel			sapic_id = IA64_LID_GET_SAPIC_ID(pc->pc_md.lid);
329196268Smarcel			printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x",
330221271Smarcel			    i, pc->pc_acpi_id, SAPIC_ID_GET_ID(sapic_id),
331221271Smarcel			    SAPIC_ID_GET_EID(sapic_id));
33288695Smarcel			if (i == 0)
33388695Smarcel				printf(" (BSP)\n");
33488695Smarcel			else
33588695Smarcel				printf("\n");
33688695Smarcel		}
33785656Smarcel	}
33866458Sdfr}
33966458Sdfr
34085656Smarcelvoid
34185656Smarcelcpu_mp_start()
34285656Smarcel{
343221271Smarcel	struct ia64_sal_result result;
344221271Smarcel	struct ia64_fdesc *fd;
34588695Smarcel	struct pcpu *pc;
346221271Smarcel	uintptr_t state;
347221271Smarcel	u_char *stp;
34885656Smarcel
349221271Smarcel	state = ia64_tpa((uintptr_t)&ia64_ap_state);
350221271Smarcel	fd = (struct ia64_fdesc *) os_boot_rendez;
351221271Smarcel	result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ,
352221271Smarcel	    ia64_tpa(fd->func), state, 0, 0, 0, 0);
35392287Sdfr
354221271Smarcel	ia64_ap_state.as_pgtbl_pte = PTE_PRESENT | PTE_MA_WB |
355221271Smarcel	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW |
356221271Smarcel	    (bootinfo->bi_pbvm_pgtbl & PTE_PPN_MASK);
357221271Smarcel	ia64_ap_state.as_pgtbl_itir = sz2shft(bootinfo->bi_pbvm_pgtblsz) << 2;
358221271Smarcel	ia64_ap_state.as_text_va = IA64_PBVM_BASE;
359221271Smarcel	ia64_ap_state.as_text_pte = PTE_PRESENT | PTE_MA_WB |
360221271Smarcel	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RX |
361221271Smarcel	    (ia64_tpa(IA64_PBVM_BASE) & PTE_PPN_MASK);
362221271Smarcel	ia64_ap_state.as_text_itir = bootinfo->bi_text_mapped << 2;
363221271Smarcel	ia64_ap_state.as_data_va = (uintptr_t)bdata;
364221271Smarcel	ia64_ap_state.as_data_pte = PTE_PRESENT | PTE_MA_WB |
365221271Smarcel	    PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW |
366221271Smarcel	    (ia64_tpa((uintptr_t)bdata) & PTE_PPN_MASK);
367221271Smarcel	ia64_ap_state.as_data_itir = bootinfo->bi_data_mapped << 2;
368221271Smarcel
369221271Smarcel	/* Keep 'em spinning until we unleash them... */
370221271Smarcel	ia64_ap_state.as_spin = 1;
371221271Smarcel
372222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
373200207Smarcel		pc->pc_md.current_pmap = kernel_pmap;
374221271Smarcel		/* The BSP is obviously running already. */
375221271Smarcel		if (pc->pc_cpuid == 0) {
376221271Smarcel			pc->pc_md.awake = 1;
377221271Smarcel			continue;
378221271Smarcel		}
37986291Smarcel
380221271Smarcel		ia64_ap_state.as_pcpu = pc;
381221271Smarcel		pc->pc_md.vhpt = pmap_alloc_vhpt();
382221271Smarcel		if (pc->pc_md.vhpt == 0) {
383221271Smarcel			printf("SMP: WARNING: unable to allocate VHPT"
384221271Smarcel			    " for cpu%d", pc->pc_cpuid);
385221271Smarcel			continue;
386221271Smarcel		}
38786291Smarcel
388221271Smarcel		stp = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, M_WAITOK);
389221271Smarcel		ia64_ap_state.as_kstack = stp;
390221271Smarcel		ia64_ap_state.as_kstack_top = stp + KSTACK_PAGES * PAGE_SIZE;
39186291Smarcel
392221271Smarcel		ia64_ap_state.as_trace = 0;
393221271Smarcel		ia64_ap_state.as_delay = 2000;
394221271Smarcel		ia64_ap_state.as_awake = 0;
39586291Smarcel
396221271Smarcel		if (bootverbose)
397221271Smarcel			printf("SMP: waking up cpu%d\n", pc->pc_cpuid);
398221271Smarcel
399221271Smarcel		/* Here she goes... */
400221271Smarcel		ipi_send(pc, ia64_ipi_wakeup);
401221271Smarcel		do {
402221271Smarcel			DELAY(1000);
403221271Smarcel		} while (--ia64_ap_state.as_delay > 0);
404221271Smarcel
405221271Smarcel		pc->pc_md.awake = ia64_ap_state.as_awake;
406221271Smarcel
407221271Smarcel		if (!ia64_ap_state.as_awake) {
408221271Smarcel			printf("SMP: WARNING: cpu%d did not wake up (code "
409221271Smarcel			    "%#lx)\n", pc->pc_cpuid,
410221271Smarcel			    ia64_ap_state.as_trace - state);
411221271Smarcel		}
41285656Smarcel	}
41385656Smarcel}
41485656Smarcel
41585656Smarcelstatic void
41685656Smarcelcpu_mp_unleash(void *dummy)
41785656Smarcel{
41888695Smarcel	struct pcpu *pc;
41988695Smarcel	int cpus;
42085656Smarcel
42196442Smarcel	if (mp_ncpus <= 1)
42285656Smarcel		return;
42385656Smarcel
424205234Smarcel	/* Allocate XIVs for IPIs */
425205234Smarcel	ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast);
426223526Smarcel	ia64_ipi_hardclock = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
427223526Smarcel	    ia64_ih_hardclock);
428205234Smarcel	ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp);
429205234Smarcel	ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI,
430205234Smarcel	    ia64_ih_preempt);
431205234Smarcel	ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs);
432205234Smarcel	ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop);
433205234Smarcel
434205234Smarcel	/* Reserve the NMI vector for IPI_STOP_HARD if possible */
435205234Smarcel	ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0)
436205234Smarcel	    ? ia64_ipi_stop : 0x400;	/* DM=NMI, Vector=n/a */
437205234Smarcel
43888695Smarcel	cpus = 0;
43988695Smarcel	smp_cpus = 0;
440222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
44188695Smarcel		cpus++;
442200207Smarcel		if (pc->pc_md.awake) {
443206558Smarcel			kproc_create(ia64_store_mca_state, pc, NULL, 0, 0,
444196268Smarcel			    "mca %u", pc->pc_cpuid);
44588695Smarcel			smp_cpus++;
446192918Srink		}
44786204Smarcel	}
44886204Smarcel
449221271Smarcel	ia64_ap_state.as_awake = 1;
450221271Smarcel	ia64_ap_state.as_spin = 0;
45192287Sdfr
452221271Smarcel	while (ia64_ap_state.as_awake != smp_cpus)
453171740Smarcel		cpu_spinwait();
45492287Sdfr
45588695Smarcel	if (smp_cpus != cpus || cpus != mp_ncpus) {
45688695Smarcel		printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
45788695Smarcel		    mp_ncpus, cpus, smp_cpus);
45888695Smarcel	}
45988695Smarcel
46088695Smarcel	smp_active = 1;
46188695Smarcel	smp_started = 1;
462205726Smarcel
463205726Smarcel	/*
464205726Smarcel	 * Now that all CPUs are up and running, bind interrupts to each of
465205726Smarcel	 * them.
466205726Smarcel	 */
467205726Smarcel	ia64_bind_intr();
46885656Smarcel}
46985656Smarcel
47066458Sdfr/*
47166458Sdfr * send an IPI to a set of cpus.
47266458Sdfr */
47366458Sdfrvoid
474222813Sattilioipi_selected(cpuset_t cpus, int ipi)
47566458Sdfr{
47688695Smarcel	struct pcpu *pc;
47766458Sdfr
478222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
479223758Sattilio		if (CPU_ISSET(pc->pc_cpuid, &cpus))
480148807Smarcel			ipi_send(pc, ipi);
48185276Smarcel	}
48266458Sdfr}
48366458Sdfr
48466458Sdfr/*
485210939Sjhb * send an IPI to a specific CPU.
486210939Sjhb */
487210939Sjhbvoid
488210939Sjhbipi_cpu(int cpu, u_int ipi)
489210939Sjhb{
490210939Sjhb
491210939Sjhb	ipi_send(cpuid_to_pcpu[cpu], ipi);
492210939Sjhb}
493210939Sjhb
494210939Sjhb/*
49585656Smarcel * send an IPI to all CPUs EXCEPT myself.
49666458Sdfr */
49766458Sdfrvoid
49885656Smarcelipi_all_but_self(int ipi)
49966458Sdfr{
50088695Smarcel	struct pcpu *pc;
50185276Smarcel
502222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
50388695Smarcel		if (pc != pcpup)
504148807Smarcel			ipi_send(pc, ipi);
50585276Smarcel	}
50666458Sdfr}
50766458Sdfr
50866458Sdfr/*
509221271Smarcel * Send an IPI to the specified processor.
51085276Smarcel */
511115084Smarcelvoid
512205234Smarcelipi_send(struct pcpu *cpu, int xiv)
51385276Smarcel{
514221271Smarcel	u_int sapic_id;
51585276Smarcel
516205234Smarcel	KASSERT(xiv != 0, ("ipi_send"));
517205234Smarcel
518221271Smarcel	sapic_id = IA64_LID_GET_SAPIC_ID(cpu->pc_md.lid);
519203883Smarcel
520203883Smarcel	ia64_mf();
521221271Smarcel	ia64_st8(&(ia64_pib->ib_ipi[sapic_id][0]), xiv);
522203883Smarcel	ia64_mf_a();
523205234Smarcel	CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid));
52485276Smarcel}
52585656Smarcel
52685656SmarcelSYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
527