mp_x86.c revision 75393
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/i386/mp_machdep.c 75393 2001-04-10 21:34:13Z jhb $
26 */
27
28#include "opt_cpu.h"
29
30#ifdef SMP
31#include <machine/smptests.h>
32#else
33#error
34#endif
35
36#include <sys/param.h>
37#include <sys/bus.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/proc.h>
41#include <sys/sysctl.h>
42#include <sys/malloc.h>
43#include <sys/memrange.h>
44#include <sys/mutex.h>
45#include <sys/dkstat.h>
46#include <sys/cons.h>	/* cngetc() */
47
48#include <vm/vm.h>
49#include <vm/vm_param.h>
50#include <vm/pmap.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_extern.h>
53#include <sys/lock.h>
54#include <vm/vm_map.h>
55#include <sys/user.h>
56#ifdef GPROF
57#include <sys/gmon.h>
58#endif
59
60#include <machine/smp.h>
61#include <machine/apic.h>
62#include <machine/atomic.h>
63#include <machine/cpufunc.h>
64#include <machine/ipl.h>
65#include <machine/mpapic.h>
66#include <machine/psl.h>
67#include <machine/segments.h>
68#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
69#include <machine/tss.h>
70#include <machine/specialreg.h>
71#include <machine/globaldata.h>
72
73#if defined(APIC_IO)
74#include <machine/md_var.h>		/* setidt() */
75#include <i386/isa/icu.h>		/* IPIs */
76#include <i386/isa/intr_machdep.h>	/* IPIs */
77#endif	/* APIC_IO */
78
79#if defined(TEST_DEFAULT_CONFIG)
80#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
81#else
82#define MPFPS_MPFB1	mpfps->mpfb1
83#endif  /* TEST_DEFAULT_CONFIG */
84
85#define WARMBOOT_TARGET		0
86#define WARMBOOT_OFF		(KERNBASE + 0x0467)
87#define WARMBOOT_SEG		(KERNBASE + 0x0469)
88
89#ifdef PC98
90#define BIOS_BASE		(0xe8000)
91#define BIOS_SIZE		(0x18000)
92#else
93#define BIOS_BASE		(0xf0000)
94#define BIOS_SIZE		(0x10000)
95#endif
96#define BIOS_COUNT		(BIOS_SIZE/4)
97
98#define CMOS_REG		(0x70)
99#define CMOS_DATA		(0x71)
100#define BIOS_RESET		(0x0f)
101#define BIOS_WARM		(0x0a)
102
103#define PROCENTRY_FLAG_EN	0x01
104#define PROCENTRY_FLAG_BP	0x02
105#define IOAPICENTRY_FLAG_EN	0x01
106
107
108/* MP Floating Pointer Structure */
109typedef struct MPFPS {
110	char    signature[4];
111	void   *pap;
112	u_char  length;
113	u_char  spec_rev;
114	u_char  checksum;
115	u_char  mpfb1;
116	u_char  mpfb2;
117	u_char  mpfb3;
118	u_char  mpfb4;
119	u_char  mpfb5;
120}      *mpfps_t;
121
122/* MP Configuration Table Header */
123typedef struct MPCTH {
124	char    signature[4];
125	u_short base_table_length;
126	u_char  spec_rev;
127	u_char  checksum;
128	u_char  oem_id[8];
129	u_char  product_id[12];
130	void   *oem_table_pointer;
131	u_short oem_table_size;
132	u_short entry_count;
133	void   *apic_address;
134	u_short extended_table_length;
135	u_char  extended_table_checksum;
136	u_char  reserved;
137}      *mpcth_t;
138
139
140typedef struct PROCENTRY {
141	u_char  type;
142	u_char  apic_id;
143	u_char  apic_version;
144	u_char  cpu_flags;
145	u_long  cpu_signature;
146	u_long  feature_flags;
147	u_long  reserved1;
148	u_long  reserved2;
149}      *proc_entry_ptr;
150
151typedef struct BUSENTRY {
152	u_char  type;
153	u_char  bus_id;
154	char    bus_type[6];
155}      *bus_entry_ptr;
156
157typedef struct IOAPICENTRY {
158	u_char  type;
159	u_char  apic_id;
160	u_char  apic_version;
161	u_char  apic_flags;
162	void   *apic_address;
163}      *io_apic_entry_ptr;
164
165typedef struct INTENTRY {
166	u_char  type;
167	u_char  int_type;
168	u_short int_flags;
169	u_char  src_bus_id;
170	u_char  src_bus_irq;
171	u_char  dst_apic_id;
172	u_char  dst_apic_int;
173}      *int_entry_ptr;
174
175/* descriptions of MP basetable entries */
176typedef struct BASETABLE_ENTRY {
177	u_char  type;
178	u_char  length;
179	char    name[16];
180}       basetable_entry;
181
182/*
183 * this code MUST be enabled here and in mpboot.s.
184 * it follows the very early stages of AP boot by placing values in CMOS ram.
185 * it NORMALLY will never be needed and thus the primitive method for enabling.
186 *
187#define CHECK_POINTS
188 */
189
190#if defined(CHECK_POINTS) && !defined(PC98)
191#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
192#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
193
194#define CHECK_INIT(D);				\
195	CHECK_WRITE(0x34, (D));			\
196	CHECK_WRITE(0x35, (D));			\
197	CHECK_WRITE(0x36, (D));			\
198	CHECK_WRITE(0x37, (D));			\
199	CHECK_WRITE(0x38, (D));			\
200	CHECK_WRITE(0x39, (D));
201
202#define CHECK_PRINT(S);				\
203	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
204	   (S),					\
205	   CHECK_READ(0x34),			\
206	   CHECK_READ(0x35),			\
207	   CHECK_READ(0x36),			\
208	   CHECK_READ(0x37),			\
209	   CHECK_READ(0x38),			\
210	   CHECK_READ(0x39));
211
212#else				/* CHECK_POINTS */
213
214#define CHECK_INIT(D)
215#define CHECK_PRINT(S)
216
217#endif				/* CHECK_POINTS */
218
219/*
220 * Values to send to the POST hardware.
221 */
222#define MP_BOOTADDRESS_POST	0x10
223#define MP_PROBE_POST		0x11
224#define MPTABLE_PASS1_POST	0x12
225
226#define MP_START_POST		0x13
227#define MP_ENABLE_POST		0x14
228#define MPTABLE_PASS2_POST	0x15
229
230#define START_ALL_APS_POST	0x16
231#define INSTALL_AP_TRAMP_POST	0x17
232#define START_AP_POST		0x18
233
234#define MP_ANNOUNCE_POST	0x19
235
236/* used to hold the AP's until we are ready to release them */
237struct mtx			ap_boot_mtx;
238
239/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
240int	current_postcode;
241
242/** XXX FIXME: what system files declare these??? */
243extern struct region_descriptor r_gdt, r_idt;
244
245int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
246int	mp_ncpus;		/* # of CPUs, including BSP */
247int	mp_naps;		/* # of Applications processors */
248int	mp_nbusses;		/* # of busses */
249int	mp_napics;		/* # of IO APICs */
250int	boot_cpu_id;		/* designated BSP */
251vm_offset_t cpu_apic_address;
252vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
253extern	int nkpt;
254
255u_int32_t cpu_apic_versions[MAXCPU];
256u_int32_t *io_apic_versions;
257
258#ifdef APIC_INTR_REORDER
259struct {
260	volatile int *location;
261	int bit;
262} apic_isrbit_location[32];
263#endif
264
265struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
266
267/*
268 * APIC ID logical/physical mapping structures.
269 * We oversize these to simplify boot-time config.
270 */
271int     cpu_num_to_apic_id[NAPICID];
272int     io_num_to_apic_id[NAPICID];
273int     apic_id_to_logical[NAPICID];
274
275
276/* Bitmap of all available CPUs */
277u_int	all_cpus;
278
279/* AP uses this during bootstrap.  Do not staticize.  */
280char *bootSTK;
281static int bootAP;
282
283/* Hotwire a 0->4MB V==P mapping */
284extern pt_entry_t *KPTphys;
285
286/* SMP page table page */
287extern pt_entry_t *SMPpt;
288
289struct pcb stoppcbs[MAXCPU];
290
291int smp_started;		/* has the system started? */
292int smp_active = 0;		/* are the APs allowed to run? */
293SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
294
295/* XXX maybe should be hw.ncpu */
296static int smp_cpus = 1;	/* how many cpu's running */
297SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
298
299int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
300SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
301
302/* Enable forwarding of a signal to a process running on a different CPU */
303static int forward_signal_enabled = 1;
304SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
305	   &forward_signal_enabled, 0, "");
306
307/* Enable forwarding of roundrobin to all other cpus */
308static int forward_roundrobin_enabled = 1;
309SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
310	   &forward_roundrobin_enabled, 0, "");
311
312
313/*
314 * Local data and functions.
315 */
316
317/* Set to 1 once we're ready to let the APs out of the pen. */
318static volatile int aps_ready = 0;
319
320static int	mp_capable;
321static u_int	boot_address;
322static u_int	base_memory;
323
324static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
325static mpfps_t	mpfps;
326static int	search_for_sig(u_int32_t target, int count);
327static void	mp_enable(u_int boot_addr);
328
329static void	mptable_pass1(void);
330static int	mptable_pass2(void);
331static void	default_mp_table(int type);
332static void	fix_mp_table(void);
333static void	setup_apic_irq_mapping(void);
334static void	init_locks(void);
335static int	start_all_aps(u_int boot_addr);
336static void	install_ap_tramp(u_int boot_addr);
337static int	start_ap(int logicalCpu, u_int boot_addr);
338void		ap_init(void);
339static int	apic_int_is_bus_type(int intr, int bus_type);
340static void	release_aps(void *dummy);
341
342/*
343 * initialize all the SMP locks
344 */
345
346/* critical region around IO APIC, apic_imen */
347struct mtx		imen_mtx;
348
349/* lock region used by kernel profiling */
350struct mtx		mcount_mtx;
351
352#ifdef USE_COMLOCK
353/* locks com (tty) data/hardware accesses: a FASTINTR() */
354struct mtx		com_mtx;
355#endif /* USE_COMLOCK */
356
357/* lock around the MP rendezvous */
358static struct mtx	smp_rv_mtx;
359
360/* only 1 CPU can panic at a time :) */
361struct mtx		panic_mtx;
362
363static void
364init_locks(void)
365{
366	/*
367	 * XXX The mcount mutex probably needs to be statically initialized,
368	 * since it will be used even in the function calls that get us to this
369	 * point.
370	 */
371	mtx_init(&mcount_mtx, "mcount", MTX_DEF);
372
373	mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
374	mtx_init(&panic_mtx, "panic", MTX_DEF);
375
376#ifdef USE_COMLOCK
377	mtx_init(&com_mtx, "com", MTX_SPIN);
378#endif /* USE_COMLOCK */
379
380	mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
381}
382
383/*
384 * Calculate usable address in base memory for AP trampoline code.
385 */
386u_int
387mp_bootaddress(u_int basemem)
388{
389	POSTCODE(MP_BOOTADDRESS_POST);
390
391	base_memory = basemem * 1024;	/* convert to bytes */
392
393	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
394	if ((base_memory - boot_address) < bootMP_size)
395		boot_address -= 4096;	/* not enough, lower by 4k */
396
397	return boot_address;
398}
399
400
401/*
402 * Look for an Intel MP spec table (ie, SMP capable hardware).
403 */
404int
405mp_probe(void)
406{
407	int     x;
408	u_long  segment;
409	u_int32_t target;
410
411	POSTCODE(MP_PROBE_POST);
412
413	/* see if EBDA exists */
414	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
415		/* search first 1K of EBDA */
416		target = (u_int32_t) (segment << 4);
417		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
418			goto found;
419	} else {
420		/* last 1K of base memory, effective 'top of base' passed in */
421		target = (u_int32_t) (base_memory - 0x400);
422		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
423			goto found;
424	}
425
426	/* search the BIOS */
427	target = (u_int32_t) BIOS_BASE;
428	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
429		goto found;
430
431	/* nothing found */
432	mpfps = (mpfps_t)0;
433	mp_capable = 0;
434	return 0;
435
436found:
437	/* calculate needed resources */
438	mpfps = (mpfps_t)x;
439	mptable_pass1();
440
441	/* flag fact that we are running multiple processors */
442	mp_capable = 1;
443	return 1;
444}
445
446
447/*
448 * Initialize the SMP hardware and the APIC and start up the AP's.
449 */
450void
451mp_start(void)
452{
453	POSTCODE(MP_START_POST);
454
455	/* look for MP capable motherboard */
456	if (mp_capable)
457		mp_enable(boot_address);
458	else
459		panic("MP hardware not found!");
460}
461
462
463/*
464 * Print various information about the SMP system hardware and setup.
465 */
466void
467mp_announce(void)
468{
469	int     x;
470
471	POSTCODE(MP_ANNOUNCE_POST);
472
473	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
474	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
475	printf(", version: 0x%08x", cpu_apic_versions[0]);
476	printf(", at 0x%08x\n", cpu_apic_address);
477	for (x = 1; x <= mp_naps; ++x) {
478		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
479		printf(", version: 0x%08x", cpu_apic_versions[x]);
480		printf(", at 0x%08x\n", cpu_apic_address);
481	}
482
483#if defined(APIC_IO)
484	for (x = 0; x < mp_napics; ++x) {
485		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
486		printf(", version: 0x%08x", io_apic_versions[x]);
487		printf(", at 0x%08x\n", io_apic_address[x]);
488	}
489#else
490	printf(" Warning: APIC I/O disabled\n");
491#endif	/* APIC_IO */
492}
493
494/*
495 * AP cpu's call this to sync up protected mode.
496 */
497void
498init_secondary(void)
499{
500	int	gsel_tss;
501	int	x, myid = bootAP;
502
503	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
504	gdt_segs[GPROC0_SEL].ssd_base =
505		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
506	SMP_prvspace[myid].globaldata.gd_prvspace =
507		&SMP_prvspace[myid].globaldata;
508
509	for (x = 0; x < NGDT; x++) {
510		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
511	}
512
513	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
514	r_gdt.rd_base = (int) &gdt[myid * NGDT];
515	lgdt(&r_gdt);			/* does magic intra-segment return */
516
517	lidt(&r_idt);
518
519	lldt(_default_ldt);
520	PCPU_SET(currentldt, _default_ldt);
521
522	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
523	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
524	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
525	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
526	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
527	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
528	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
529	ltr(gsel_tss);
530
531	pmap_set_opt();
532}
533
534
535#if defined(APIC_IO)
536/*
537 * Final configuration of the BSP's local APIC:
538 *  - disable 'pic mode'.
539 *  - disable 'virtual wire mode'.
540 *  - enable NMI.
541 */
542void
543bsp_apic_configure(void)
544{
545	u_char		byte;
546	u_int32_t	temp;
547
548	/* leave 'pic mode' if necessary */
549	if (picmode) {
550		outb(0x22, 0x70);	/* select IMCR */
551		byte = inb(0x23);	/* current contents */
552		byte |= 0x01;		/* mask external INTR */
553		outb(0x23, byte);	/* disconnect 8259s/NMI */
554	}
555
556	/* mask lint0 (the 8259 'virtual wire' connection) */
557	temp = lapic.lvt_lint0;
558	temp |= APIC_LVT_M;		/* set the mask */
559	lapic.lvt_lint0 = temp;
560
561        /* setup lint1 to handle NMI */
562        temp = lapic.lvt_lint1;
563        temp &= ~APIC_LVT_M;		/* clear the mask */
564        lapic.lvt_lint1 = temp;
565
566	if (bootverbose)
567		apic_dump("bsp_apic_configure()");
568}
569#endif  /* APIC_IO */
570
571
572/*******************************************************************
573 * local functions and data
574 */
575
576/*
577 * start the SMP system
578 */
579static void
580mp_enable(u_int boot_addr)
581{
582	int     x;
583#if defined(APIC_IO)
584	int     apic;
585	u_int   ux;
586#endif	/* APIC_IO */
587
588	POSTCODE(MP_ENABLE_POST);
589
590	/* turn on 4MB of V == P addressing so we can get to MP table */
591	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
592	invltlb();
593
594	/* examine the MP table for needed info, uses physical addresses */
595	x = mptable_pass2();
596
597	*(int *)PTD = 0;
598	invltlb();
599
600	/* can't process default configs till the CPU APIC is pmapped */
601	if (x)
602		default_mp_table(x);
603
604	/* post scan cleanup */
605	fix_mp_table();
606	setup_apic_irq_mapping();
607
608#if defined(APIC_IO)
609
610	/* fill the LOGICAL io_apic_versions table */
611	for (apic = 0; apic < mp_napics; ++apic) {
612		ux = io_apic_read(apic, IOAPIC_VER);
613		io_apic_versions[apic] = ux;
614		io_apic_set_id(apic, IO_TO_ID(apic));
615	}
616
617	/* program each IO APIC in the system */
618	for (apic = 0; apic < mp_napics; ++apic)
619		if (io_apic_setup(apic) < 0)
620			panic("IO APIC setup failure");
621
622	/* install a 'Spurious INTerrupt' vector */
623	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
624	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
625
626	/* install an inter-CPU IPI for TLB invalidation */
627	setidt(XINVLTLB_OFFSET, Xinvltlb,
628	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
629
630	/* install an inter-CPU IPI for reading processor state */
631	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
632	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
633
634	/* install an inter-CPU IPI for all-CPU rendezvous */
635	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
636	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
637
638	/* install an inter-CPU IPI for forcing an additional software trap */
639	setidt(XCPUAST_OFFSET, Xcpuast,
640	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
641
642	/* install an inter-CPU IPI for CPU stop/restart */
643	setidt(XCPUSTOP_OFFSET, Xcpustop,
644	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
645
646#if defined(TEST_TEST1)
647	/* install a "fake hardware INTerrupt" vector */
648	setidt(XTEST1_OFFSET, Xtest1,
649	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
650#endif  /** TEST_TEST1 */
651
652#endif	/* APIC_IO */
653
654	/* initialize all SMP locks */
655	init_locks();
656
657	/* start each Application Processor */
658	start_all_aps(boot_addr);
659}
660
661
662/*
663 * look for the MP spec signature
664 */
665
666/* string defined by the Intel MP Spec as identifying the MP table */
667#define MP_SIG		0x5f504d5f	/* _MP_ */
668#define NEXT(X)		((X) += 4)
669static int
670search_for_sig(u_int32_t target, int count)
671{
672	int     x;
673	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
674
675	for (x = 0; x < count; NEXT(x))
676		if (addr[x] == MP_SIG)
677			/* make array index a byte index */
678			return (target + (x * sizeof(u_int32_t)));
679
680	return -1;
681}
682
683
684static basetable_entry basetable_entry_types[] =
685{
686	{0, 20, "Processor"},
687	{1, 8, "Bus"},
688	{2, 8, "I/O APIC"},
689	{3, 8, "I/O INT"},
690	{4, 8, "Local INT"}
691};
692
693typedef struct BUSDATA {
694	u_char  bus_id;
695	enum busTypes bus_type;
696}       bus_datum;
697
698typedef struct INTDATA {
699	u_char  int_type;
700	u_short int_flags;
701	u_char  src_bus_id;
702	u_char  src_bus_irq;
703	u_char  dst_apic_id;
704	u_char  dst_apic_int;
705	u_char	int_vector;
706}       io_int, local_int;
707
708typedef struct BUSTYPENAME {
709	u_char  type;
710	char    name[7];
711}       bus_type_name;
712
713static bus_type_name bus_type_table[] =
714{
715	{CBUS, "CBUS"},
716	{CBUSII, "CBUSII"},
717	{EISA, "EISA"},
718	{MCA, "MCA"},
719	{UNKNOWN_BUSTYPE, "---"},
720	{ISA, "ISA"},
721	{MCA, "MCA"},
722	{UNKNOWN_BUSTYPE, "---"},
723	{UNKNOWN_BUSTYPE, "---"},
724	{UNKNOWN_BUSTYPE, "---"},
725	{UNKNOWN_BUSTYPE, "---"},
726	{UNKNOWN_BUSTYPE, "---"},
727	{PCI, "PCI"},
728	{UNKNOWN_BUSTYPE, "---"},
729	{UNKNOWN_BUSTYPE, "---"},
730	{UNKNOWN_BUSTYPE, "---"},
731	{UNKNOWN_BUSTYPE, "---"},
732	{XPRESS, "XPRESS"},
733	{UNKNOWN_BUSTYPE, "---"}
734};
735/* from MP spec v1.4, table 5-1 */
736static int default_data[7][5] =
737{
738/*   nbus, id0, type0, id1, type1 */
739	{1, 0, ISA, 255, 255},
740	{1, 0, EISA, 255, 255},
741	{1, 0, EISA, 255, 255},
742	{1, 0, MCA, 255, 255},
743	{2, 0, ISA, 1, PCI},
744	{2, 0, EISA, 1, PCI},
745	{2, 0, MCA, 1, PCI}
746};
747
748
749/* the bus data */
750static bus_datum *bus_data;
751
752/* the IO INT data, one entry per possible APIC INTerrupt */
753static io_int  *io_apic_ints;
754
755static int nintrs;
756
757static int processor_entry	__P((proc_entry_ptr entry, int cpu));
758static int bus_entry		__P((bus_entry_ptr entry, int bus));
759static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
760static int int_entry		__P((int_entry_ptr entry, int intr));
761static int lookup_bus_type	__P((char *name));
762
763
764/*
765 * 1st pass on motherboard's Intel MP specification table.
766 *
767 * initializes:
768 *	mp_ncpus = 1
769 *
770 * determines:
771 *	cpu_apic_address (common to all CPUs)
772 *	io_apic_address[N]
773 *	mp_naps
774 *	mp_nbusses
775 *	mp_napics
776 *	nintrs
777 */
778static void
779mptable_pass1(void)
780{
781	int	x;
782	mpcth_t	cth;
783	int	totalSize;
784	void*	position;
785	int	count;
786	int	type;
787
788	POSTCODE(MPTABLE_PASS1_POST);
789
790	/* clear various tables */
791	for (x = 0; x < NAPICID; ++x) {
792		io_apic_address[x] = ~0;	/* IO APIC address table */
793	}
794
795	/* init everything to empty */
796	mp_naps = 0;
797	mp_nbusses = 0;
798	mp_napics = 0;
799	nintrs = 0;
800
801	/* check for use of 'default' configuration */
802	if (MPFPS_MPFB1 != 0) {
803		/* use default addresses */
804		cpu_apic_address = DEFAULT_APIC_BASE;
805		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
806
807		/* fill in with defaults */
808		mp_naps = 2;		/* includes BSP */
809		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
810#if defined(APIC_IO)
811		mp_napics = 1;
812		nintrs = 16;
813#endif	/* APIC_IO */
814	}
815	else {
816		if ((cth = mpfps->pap) == 0)
817			panic("MP Configuration Table Header MISSING!");
818
819		cpu_apic_address = (vm_offset_t) cth->apic_address;
820
821		/* walk the table, recording info of interest */
822		totalSize = cth->base_table_length - sizeof(struct MPCTH);
823		position = (u_char *) cth + sizeof(struct MPCTH);
824		count = cth->entry_count;
825
826		while (count--) {
827			switch (type = *(u_char *) position) {
828			case 0: /* processor_entry */
829				if (((proc_entry_ptr)position)->cpu_flags
830					& PROCENTRY_FLAG_EN)
831					++mp_naps;
832				break;
833			case 1: /* bus_entry */
834				++mp_nbusses;
835				break;
836			case 2: /* io_apic_entry */
837				if (((io_apic_entry_ptr)position)->apic_flags
838					& IOAPICENTRY_FLAG_EN)
839					io_apic_address[mp_napics++] =
840					    (vm_offset_t)((io_apic_entry_ptr)
841						position)->apic_address;
842				break;
843			case 3: /* int_entry */
844				++nintrs;
845				break;
846			case 4:	/* int_entry */
847				break;
848			default:
849				panic("mpfps Base Table HOSED!");
850				/* NOTREACHED */
851			}
852
853			totalSize -= basetable_entry_types[type].length;
854			(u_char*)position += basetable_entry_types[type].length;
855		}
856	}
857
858	/* qualify the numbers */
859	if (mp_naps > MAXCPU) {
860		printf("Warning: only using %d of %d available CPUs!\n",
861			MAXCPU, mp_naps);
862		mp_naps = MAXCPU;
863	}
864
865	/*
866	 * Count the BSP.
867	 * This is also used as a counter while starting the APs.
868	 */
869	mp_ncpus = 1;
870
871	--mp_naps;	/* subtract the BSP */
872}
873
874
875/*
876 * 2nd pass on motherboard's Intel MP specification table.
877 *
878 * sets:
879 *	boot_cpu_id
880 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
881 *	CPU_TO_ID(N), logical CPU to APIC ID table
882 *	IO_TO_ID(N), logical IO to APIC ID table
883 *	bus_data[N]
884 *	io_apic_ints[N]
885 */
886static int
887mptable_pass2(void)
888{
889	int     x;
890	mpcth_t cth;
891	int     totalSize;
892	void*   position;
893	int     count;
894	int     type;
895	int     apic, bus, cpu, intr;
896	int	i, j;
897	int	pgeflag;
898
899	POSTCODE(MPTABLE_PASS2_POST);
900
901	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
902
903	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
904	    M_DEVBUF, M_WAITOK);
905	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
906	    M_DEVBUF, M_WAITOK);
907	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
908	    M_DEVBUF, M_WAITOK);
909	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
910	    M_DEVBUF, M_WAITOK);
911
912	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
913
914	for (i = 0; i < mp_napics; i++) {
915		for (j = 0; j < mp_napics; j++) {
916			/* same page frame as a previous IO apic? */
917			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
918			    (io_apic_address[i] & PG_FRAME)) {
919				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
920					+ (NPTEPG-2-j) * PAGE_SIZE
921					+ (io_apic_address[i] & PAGE_MASK));
922				break;
923			}
924			/* use this slot if available */
925			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
926				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
927				    pgeflag | (io_apic_address[i] & PG_FRAME));
928				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
929					+ (NPTEPG-2-j) * PAGE_SIZE
930					+ (io_apic_address[i] & PAGE_MASK));
931				break;
932			}
933		}
934	}
935
936	/* clear various tables */
937	for (x = 0; x < NAPICID; ++x) {
938		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
939		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
940		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
941	}
942
943	/* clear bus data table */
944	for (x = 0; x < mp_nbusses; ++x)
945		bus_data[x].bus_id = 0xff;
946
947	/* clear IO APIC INT table */
948	for (x = 0; x < (nintrs + 1); ++x) {
949		io_apic_ints[x].int_type = 0xff;
950		io_apic_ints[x].int_vector = 0xff;
951	}
952
953	/* setup the cpu/apic mapping arrays */
954	boot_cpu_id = -1;
955
956	/* record whether PIC or virtual-wire mode */
957	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
958
959	/* check for use of 'default' configuration */
960	if (MPFPS_MPFB1 != 0)
961		return MPFPS_MPFB1;	/* return default configuration type */
962
963	if ((cth = mpfps->pap) == 0)
964		panic("MP Configuration Table Header MISSING!");
965
966	/* walk the table, recording info of interest */
967	totalSize = cth->base_table_length - sizeof(struct MPCTH);
968	position = (u_char *) cth + sizeof(struct MPCTH);
969	count = cth->entry_count;
970	apic = bus = intr = 0;
971	cpu = 1;				/* pre-count the BSP */
972
973	while (count--) {
974		switch (type = *(u_char *) position) {
975		case 0:
976			if (processor_entry(position, cpu))
977				++cpu;
978			break;
979		case 1:
980			if (bus_entry(position, bus))
981				++bus;
982			break;
983		case 2:
984			if (io_apic_entry(position, apic))
985				++apic;
986			break;
987		case 3:
988			if (int_entry(position, intr))
989				++intr;
990			break;
991		case 4:
992			/* int_entry(position); */
993			break;
994		default:
995			panic("mpfps Base Table HOSED!");
996			/* NOTREACHED */
997		}
998
999		totalSize -= basetable_entry_types[type].length;
1000		(u_char *) position += basetable_entry_types[type].length;
1001	}
1002
1003	if (boot_cpu_id == -1)
1004		panic("NO BSP found!");
1005
1006	/* report fact that its NOT a default configuration */
1007	return 0;
1008}
1009
1010
1011void
1012assign_apic_irq(int apic, int intpin, int irq)
1013{
1014	int x;
1015
1016	if (int_to_apicintpin[irq].ioapic != -1)
1017		panic("assign_apic_irq: inconsistent table");
1018
1019	int_to_apicintpin[irq].ioapic = apic;
1020	int_to_apicintpin[irq].int_pin = intpin;
1021	int_to_apicintpin[irq].apic_address = ioapic[apic];
1022	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1023
1024	for (x = 0; x < nintrs; x++) {
1025		if ((io_apic_ints[x].int_type == 0 ||
1026		     io_apic_ints[x].int_type == 3) &&
1027		    io_apic_ints[x].int_vector == 0xff &&
1028		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1029		    io_apic_ints[x].dst_apic_int == intpin)
1030			io_apic_ints[x].int_vector = irq;
1031	}
1032}
1033
1034void
1035revoke_apic_irq(int irq)
1036{
1037	int x;
1038	int oldapic;
1039	int oldintpin;
1040
1041	if (int_to_apicintpin[irq].ioapic == -1)
1042		panic("assign_apic_irq: inconsistent table");
1043
1044	oldapic = int_to_apicintpin[irq].ioapic;
1045	oldintpin = int_to_apicintpin[irq].int_pin;
1046
1047	int_to_apicintpin[irq].ioapic = -1;
1048	int_to_apicintpin[irq].int_pin = 0;
1049	int_to_apicintpin[irq].apic_address = NULL;
1050	int_to_apicintpin[irq].redirindex = 0;
1051
1052	for (x = 0; x < nintrs; x++) {
1053		if ((io_apic_ints[x].int_type == 0 ||
1054		     io_apic_ints[x].int_type == 3) &&
1055		    io_apic_ints[x].int_vector == 0xff &&
1056		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1057		    io_apic_ints[x].dst_apic_int == oldintpin)
1058			io_apic_ints[x].int_vector = 0xff;
1059	}
1060}
1061
1062
1063static void
1064allocate_apic_irq(int intr)
1065{
1066	int apic;
1067	int intpin;
1068	int irq;
1069
1070	if (io_apic_ints[intr].int_vector != 0xff)
1071		return;		/* Interrupt handler already assigned */
1072
1073	if (io_apic_ints[intr].int_type != 0 &&
1074	    (io_apic_ints[intr].int_type != 3 ||
1075	     (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1076	      io_apic_ints[intr].dst_apic_int == 0)))
1077		return;		/* Not INT or ExtInt on != (0, 0) */
1078
1079	irq = 0;
1080	while (irq < APIC_INTMAPSIZE &&
1081	       int_to_apicintpin[irq].ioapic != -1)
1082		irq++;
1083
1084	if (irq >= APIC_INTMAPSIZE)
1085		return;		/* No free interrupt handlers */
1086
1087	apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1088	intpin = io_apic_ints[intr].dst_apic_int;
1089
1090	assign_apic_irq(apic, intpin, irq);
1091	io_apic_setup_intpin(apic, intpin);
1092}
1093
1094
1095static void
1096swap_apic_id(int apic, int oldid, int newid)
1097{
1098	int x;
1099	int oapic;
1100
1101
1102	if (oldid == newid)
1103		return;			/* Nothing to do */
1104
1105	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1106	       apic, oldid, newid);
1107
1108	/* Swap physical APIC IDs in interrupt entries */
1109	for (x = 0; x < nintrs; x++) {
1110		if (io_apic_ints[x].dst_apic_id == oldid)
1111			io_apic_ints[x].dst_apic_id = newid;
1112		else if (io_apic_ints[x].dst_apic_id == newid)
1113			io_apic_ints[x].dst_apic_id = oldid;
1114	}
1115
1116	/* Swap physical APIC IDs in IO_TO_ID mappings */
1117	for (oapic = 0; oapic < mp_napics; oapic++)
1118		if (IO_TO_ID(oapic) == newid)
1119			break;
1120
1121	if (oapic < mp_napics) {
1122		printf("Changing APIC ID for IO APIC #%d from "
1123		       "%d to %d in MP table\n",
1124		       oapic, newid, oldid);
1125		IO_TO_ID(oapic) = oldid;
1126	}
1127	IO_TO_ID(apic) = newid;
1128}
1129
1130
1131static void
1132fix_id_to_io_mapping(void)
1133{
1134	int x;
1135
1136	for (x = 0; x < NAPICID; x++)
1137		ID_TO_IO(x) = -1;
1138
1139	for (x = 0; x <= mp_naps; x++)
1140		if (CPU_TO_ID(x) < NAPICID)
1141			ID_TO_IO(CPU_TO_ID(x)) = x;
1142
1143	for (x = 0; x < mp_napics; x++)
1144		if (IO_TO_ID(x) < NAPICID)
1145			ID_TO_IO(IO_TO_ID(x)) = x;
1146}
1147
1148
1149static int
1150first_free_apic_id(void)
1151{
1152	int freeid, x;
1153
1154	for (freeid = 0; freeid < NAPICID; freeid++) {
1155		for (x = 0; x <= mp_naps; x++)
1156			if (CPU_TO_ID(x) == freeid)
1157				break;
1158		if (x <= mp_naps)
1159			continue;
1160		for (x = 0; x < mp_napics; x++)
1161			if (IO_TO_ID(x) == freeid)
1162				break;
1163		if (x < mp_napics)
1164			continue;
1165		return freeid;
1166	}
1167	return freeid;
1168}
1169
1170
1171static int
1172io_apic_id_acceptable(int apic, int id)
1173{
1174	int cpu;		/* Logical CPU number */
1175	int oapic;		/* Logical IO APIC number for other IO APIC */
1176
1177	if (id >= NAPICID)
1178		return 0;	/* Out of range */
1179
1180	for (cpu = 0; cpu <= mp_naps; cpu++)
1181		if (CPU_TO_ID(cpu) == id)
1182			return 0;	/* Conflict with CPU */
1183
1184	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1185		if (IO_TO_ID(oapic) == id)
1186			return 0;	/* Conflict with other APIC */
1187
1188	return 1;		/* ID is acceptable for IO APIC */
1189}
1190
1191
1192/*
1193 * parse an Intel MP specification table
1194 */
1195static void
1196fix_mp_table(void)
1197{
1198	int	x;
1199	int	id;
1200	int	bus_0 = 0;	/* Stop GCC warning */
1201	int	bus_pci = 0;	/* Stop GCC warning */
1202	int	num_pci_bus;
1203	int	apic;		/* IO APIC unit number */
1204	int     freeid;		/* Free physical APIC ID */
1205	int	physid;		/* Current physical IO APIC ID */
1206
1207	/*
1208	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1209	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1210	 * exists the BIOS must begin with bus entries for the PCI bus and use
1211	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1212	 * exists the BIOS can choose to ignore this ordering, and indeed many
1213	 * MP motherboards do ignore it.  This causes a problem when the PCI
1214	 * sub-system makes requests of the MP sub-system based on PCI bus
1215	 * numbers.	So here we look for the situation and renumber the
1216	 * busses and associated INTs in an effort to "make it right".
1217	 */
1218
1219	/* find bus 0, PCI bus, count the number of PCI busses */
1220	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1221		if (bus_data[x].bus_id == 0) {
1222			bus_0 = x;
1223		}
1224		if (bus_data[x].bus_type == PCI) {
1225			++num_pci_bus;
1226			bus_pci = x;
1227		}
1228	}
1229	/*
1230	 * bus_0 == slot of bus with ID of 0
1231	 * bus_pci == slot of last PCI bus encountered
1232	 */
1233
1234	/* check the 1 PCI bus case for sanity */
1235	/* if it is number 0 all is well */
1236	if (num_pci_bus == 1 &&
1237	    bus_data[bus_pci].bus_id != 0) {
1238
1239		/* mis-numbered, swap with whichever bus uses slot 0 */
1240
1241		/* swap the bus entry types */
1242		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1243		bus_data[bus_0].bus_type = PCI;
1244
1245		/* swap each relavant INTerrupt entry */
1246		id = bus_data[bus_pci].bus_id;
1247		for (x = 0; x < nintrs; ++x) {
1248			if (io_apic_ints[x].src_bus_id == id) {
1249				io_apic_ints[x].src_bus_id = 0;
1250			}
1251			else if (io_apic_ints[x].src_bus_id == 0) {
1252				io_apic_ints[x].src_bus_id = id;
1253			}
1254		}
1255	}
1256
1257	/* Assign IO APIC IDs.
1258	 *
1259	 * First try the existing ID. If a conflict is detected, try
1260	 * the ID in the MP table.  If a conflict is still detected, find
1261	 * a free id.
1262	 *
1263	 * We cannot use the ID_TO_IO table before all conflicts has been
1264	 * resolved and the table has been corrected.
1265	 */
1266	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1267
1268		/* First try to use the value set by the BIOS */
1269		physid = io_apic_get_id(apic);
1270		if (io_apic_id_acceptable(apic, physid)) {
1271			if (IO_TO_ID(apic) != physid)
1272				swap_apic_id(apic, IO_TO_ID(apic), physid);
1273			continue;
1274		}
1275
1276		/* Then check if the value in the MP table is acceptable */
1277		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1278			continue;
1279
1280		/* Last resort, find a free APIC ID and use it */
1281		freeid = first_free_apic_id();
1282		if (freeid >= NAPICID)
1283			panic("No free physical APIC IDs found");
1284
1285		if (io_apic_id_acceptable(apic, freeid)) {
1286			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1287			continue;
1288		}
1289		panic("Free physical APIC ID not usable");
1290	}
1291	fix_id_to_io_mapping();
1292
1293	/* detect and fix broken Compaq MP table */
1294	if (apic_int_type(0, 0) == -1) {
1295		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1296		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1297		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1298		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1299		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1300		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1301		nintrs++;
1302	}
1303}
1304
1305
1306/* Assign low level interrupt handlers */
1307static void
1308setup_apic_irq_mapping(void)
1309{
1310	int	x;
1311	int	int_vector;
1312
1313	/* Clear array */
1314	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1315		int_to_apicintpin[x].ioapic = -1;
1316		int_to_apicintpin[x].int_pin = 0;
1317		int_to_apicintpin[x].apic_address = NULL;
1318		int_to_apicintpin[x].redirindex = 0;
1319	}
1320
1321	/* First assign ISA/EISA interrupts */
1322	for (x = 0; x < nintrs; x++) {
1323		int_vector = io_apic_ints[x].src_bus_irq;
1324		if (int_vector < APIC_INTMAPSIZE &&
1325		    io_apic_ints[x].int_vector == 0xff &&
1326		    int_to_apicintpin[int_vector].ioapic == -1 &&
1327		    (apic_int_is_bus_type(x, ISA) ||
1328		     apic_int_is_bus_type(x, EISA)) &&
1329		    io_apic_ints[x].int_type == 0) {
1330			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1331					io_apic_ints[x].dst_apic_int,
1332					int_vector);
1333		}
1334	}
1335
1336	/* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1337	for (x = 0; x < nintrs; x++) {
1338		if (io_apic_ints[x].dst_apic_int == 0 &&
1339		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1340		    io_apic_ints[x].int_vector == 0xff &&
1341		    int_to_apicintpin[0].ioapic == -1 &&
1342		    io_apic_ints[x].int_type == 3) {
1343			assign_apic_irq(0, 0, 0);
1344			break;
1345		}
1346	}
1347	/* PCI interrupt assignment is deferred */
1348}
1349
1350
1351static int
1352processor_entry(proc_entry_ptr entry, int cpu)
1353{
1354	/* check for usability */
1355	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1356		return 0;
1357
1358	if(entry->apic_id >= NAPICID)
1359		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1360	/* check for BSP flag */
1361	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1362		boot_cpu_id = entry->apic_id;
1363		CPU_TO_ID(0) = entry->apic_id;
1364		ID_TO_CPU(entry->apic_id) = 0;
1365		return 0;	/* its already been counted */
1366	}
1367
1368	/* add another AP to list, if less than max number of CPUs */
1369	else if (cpu < MAXCPU) {
1370		CPU_TO_ID(cpu) = entry->apic_id;
1371		ID_TO_CPU(entry->apic_id) = cpu;
1372		return 1;
1373	}
1374
1375	return 0;
1376}
1377
1378
1379static int
1380bus_entry(bus_entry_ptr entry, int bus)
1381{
1382	int     x;
1383	char    c, name[8];
1384
1385	/* encode the name into an index */
1386	for (x = 0; x < 6; ++x) {
1387		if ((c = entry->bus_type[x]) == ' ')
1388			break;
1389		name[x] = c;
1390	}
1391	name[x] = '\0';
1392
1393	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1394		panic("unknown bus type: '%s'", name);
1395
1396	bus_data[bus].bus_id = entry->bus_id;
1397	bus_data[bus].bus_type = x;
1398
1399	return 1;
1400}
1401
1402
1403static int
1404io_apic_entry(io_apic_entry_ptr entry, int apic)
1405{
1406	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1407		return 0;
1408
1409	IO_TO_ID(apic) = entry->apic_id;
1410	if (entry->apic_id < NAPICID)
1411		ID_TO_IO(entry->apic_id) = apic;
1412
1413	return 1;
1414}
1415
1416
1417static int
1418lookup_bus_type(char *name)
1419{
1420	int     x;
1421
1422	for (x = 0; x < MAX_BUSTYPE; ++x)
1423		if (strcmp(bus_type_table[x].name, name) == 0)
1424			return bus_type_table[x].type;
1425
1426	return UNKNOWN_BUSTYPE;
1427}
1428
1429
1430static int
1431int_entry(int_entry_ptr entry, int intr)
1432{
1433	int apic;
1434
1435	io_apic_ints[intr].int_type = entry->int_type;
1436	io_apic_ints[intr].int_flags = entry->int_flags;
1437	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1438	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1439	if (entry->dst_apic_id == 255) {
1440		/* This signal goes to all IO APICS.  Select an IO APIC
1441		   with sufficient number of interrupt pins */
1442		for (apic = 0; apic < mp_napics; apic++)
1443			if (((io_apic_read(apic, IOAPIC_VER) &
1444			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1445			    entry->dst_apic_int)
1446				break;
1447		if (apic < mp_napics)
1448			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1449		else
1450			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1451	} else
1452		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1453	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1454
1455	return 1;
1456}
1457
1458
1459static int
1460apic_int_is_bus_type(int intr, int bus_type)
1461{
1462	int     bus;
1463
1464	for (bus = 0; bus < mp_nbusses; ++bus)
1465		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1466		    && ((int) bus_data[bus].bus_type == bus_type))
1467			return 1;
1468
1469	return 0;
1470}
1471
1472
1473/*
1474 * Given a traditional ISA INT mask, return an APIC mask.
1475 */
1476u_int
1477isa_apic_mask(u_int isa_mask)
1478{
1479	int isa_irq;
1480	int apic_pin;
1481
1482#if defined(SKIP_IRQ15_REDIRECT)
1483	if (isa_mask == (1 << 15)) {
1484		printf("skipping ISA IRQ15 redirect\n");
1485		return isa_mask;
1486	}
1487#endif  /* SKIP_IRQ15_REDIRECT */
1488
1489	isa_irq = ffs(isa_mask);		/* find its bit position */
1490	if (isa_irq == 0)			/* doesn't exist */
1491		return 0;
1492	--isa_irq;				/* make it zero based */
1493
1494	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1495	if (apic_pin == -1)
1496		return 0;
1497
1498	return (1 << apic_pin);			/* convert pin# to a mask */
1499}
1500
1501
1502/*
1503 * Determine which APIC pin an ISA/EISA INT is attached to.
1504 */
1505#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1506#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1507#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1508#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1509
1510#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1511int
1512isa_apic_irq(int isa_irq)
1513{
1514	int     intr;
1515
1516	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1517		if (INTTYPE(intr) == 0) {		/* standard INT */
1518			if (SRCBUSIRQ(intr) == isa_irq) {
1519				if (apic_int_is_bus_type(intr, ISA) ||
1520			            apic_int_is_bus_type(intr, EISA)) {
1521					if (INTIRQ(intr) == 0xff)
1522						return -1; /* unassigned */
1523					return INTIRQ(intr);	/* found */
1524				}
1525			}
1526		}
1527	}
1528	return -1;					/* NOT found */
1529}
1530
1531
1532/*
1533 * Determine which APIC pin a PCI INT is attached to.
1534 */
1535#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1536#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1537#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1538int
1539pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1540{
1541	int     intr;
1542
1543	--pciInt;					/* zero based */
1544
1545	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1546		if ((INTTYPE(intr) == 0)		/* standard INT */
1547		    && (SRCBUSID(intr) == pciBus)
1548		    && (SRCBUSDEVICE(intr) == pciDevice)
1549		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1550			if (apic_int_is_bus_type(intr, PCI)) {
1551				if (INTIRQ(intr) == 0xff)
1552					allocate_apic_irq(intr);
1553				if (INTIRQ(intr) == 0xff)
1554					return -1;	/* unassigned */
1555				return INTIRQ(intr);	/* exact match */
1556			}
1557
1558	return -1;					/* NOT found */
1559}
1560
1561int
1562next_apic_irq(int irq)
1563{
1564	int intr, ointr;
1565	int bus, bustype;
1566
1567	bus = 0;
1568	bustype = 0;
1569	for (intr = 0; intr < nintrs; intr++) {
1570		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1571			continue;
1572		bus = SRCBUSID(intr);
1573		bustype = apic_bus_type(bus);
1574		if (bustype != ISA &&
1575		    bustype != EISA &&
1576		    bustype != PCI)
1577			continue;
1578		break;
1579	}
1580	if (intr >= nintrs) {
1581		return -1;
1582	}
1583	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1584		if (INTTYPE(ointr) != 0)
1585			continue;
1586		if (bus != SRCBUSID(ointr))
1587			continue;
1588		if (bustype == PCI) {
1589			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1590				continue;
1591			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1592				continue;
1593		}
1594		if (bustype == ISA || bustype == EISA) {
1595			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1596				continue;
1597		}
1598		if (INTPIN(intr) == INTPIN(ointr))
1599			continue;
1600		break;
1601	}
1602	if (ointr >= nintrs) {
1603		return -1;
1604	}
1605	return INTIRQ(ointr);
1606}
1607#undef SRCBUSLINE
1608#undef SRCBUSDEVICE
1609#undef SRCBUSID
1610#undef SRCBUSIRQ
1611
1612#undef INTPIN
1613#undef INTIRQ
1614#undef INTAPIC
1615#undef INTTYPE
1616
1617
1618/*
1619 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1620 *
1621 * XXX FIXME:
1622 *  Exactly what this means is unclear at this point.  It is a solution
1623 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1624 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1625 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1626 *  option.
1627 */
1628int
1629undirect_isa_irq(int rirq)
1630{
1631#if defined(READY)
1632	if (bootverbose)
1633	    printf("Freeing redirected ISA irq %d.\n", rirq);
1634	/** FIXME: tickle the MB redirector chip */
1635	return -1;
1636#else
1637	if (bootverbose)
1638	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1639	return 0;
1640#endif  /* READY */
1641}
1642
1643
1644/*
1645 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1646 */
1647int
1648undirect_pci_irq(int rirq)
1649{
1650#if defined(READY)
1651	if (bootverbose)
1652		printf("Freeing redirected PCI irq %d.\n", rirq);
1653
1654	/** FIXME: tickle the MB redirector chip */
1655	return -1;
1656#else
1657	if (bootverbose)
1658		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1659		       rirq);
1660	return 0;
1661#endif  /* READY */
1662}
1663
1664
1665/*
1666 * given a bus ID, return:
1667 *  the bus type if found
1668 *  -1 if NOT found
1669 */
1670int
1671apic_bus_type(int id)
1672{
1673	int     x;
1674
1675	for (x = 0; x < mp_nbusses; ++x)
1676		if (bus_data[x].bus_id == id)
1677			return bus_data[x].bus_type;
1678
1679	return -1;
1680}
1681
1682
1683/*
1684 * given a LOGICAL APIC# and pin#, return:
1685 *  the associated src bus ID if found
1686 *  -1 if NOT found
1687 */
1688int
1689apic_src_bus_id(int apic, int pin)
1690{
1691	int     x;
1692
1693	/* search each of the possible INTerrupt sources */
1694	for (x = 0; x < nintrs; ++x)
1695		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1696		    (pin == io_apic_ints[x].dst_apic_int))
1697			return (io_apic_ints[x].src_bus_id);
1698
1699	return -1;		/* NOT found */
1700}
1701
1702
1703/*
1704 * given a LOGICAL APIC# and pin#, return:
1705 *  the associated src bus IRQ if found
1706 *  -1 if NOT found
1707 */
1708int
1709apic_src_bus_irq(int apic, int pin)
1710{
1711	int     x;
1712
1713	for (x = 0; x < nintrs; x++)
1714		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1715		    (pin == io_apic_ints[x].dst_apic_int))
1716			return (io_apic_ints[x].src_bus_irq);
1717
1718	return -1;		/* NOT found */
1719}
1720
1721
1722/*
1723 * given a LOGICAL APIC# and pin#, return:
1724 *  the associated INTerrupt type if found
1725 *  -1 if NOT found
1726 */
1727int
1728apic_int_type(int apic, int pin)
1729{
1730	int     x;
1731
1732	/* search each of the possible INTerrupt sources */
1733	for (x = 0; x < nintrs; ++x)
1734		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1735		    (pin == io_apic_ints[x].dst_apic_int))
1736			return (io_apic_ints[x].int_type);
1737
1738	return -1;		/* NOT found */
1739}
1740
1741int
1742apic_irq(int apic, int pin)
1743{
1744	int x;
1745	int res;
1746
1747	for (x = 0; x < nintrs; ++x)
1748		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1749		    (pin == io_apic_ints[x].dst_apic_int)) {
1750			res = io_apic_ints[x].int_vector;
1751			if (res == 0xff)
1752				return -1;
1753			if (apic != int_to_apicintpin[res].ioapic)
1754				panic("apic_irq: inconsistent table");
1755			if (pin != int_to_apicintpin[res].int_pin)
1756				panic("apic_irq inconsistent table (2)");
1757			return res;
1758		}
1759	return -1;
1760}
1761
1762
1763/*
1764 * given a LOGICAL APIC# and pin#, return:
1765 *  the associated trigger mode if found
1766 *  -1 if NOT found
1767 */
1768int
1769apic_trigger(int apic, int pin)
1770{
1771	int     x;
1772
1773	/* search each of the possible INTerrupt sources */
1774	for (x = 0; x < nintrs; ++x)
1775		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1776		    (pin == io_apic_ints[x].dst_apic_int))
1777			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1778
1779	return -1;		/* NOT found */
1780}
1781
1782
1783/*
1784 * given a LOGICAL APIC# and pin#, return:
1785 *  the associated 'active' level if found
1786 *  -1 if NOT found
1787 */
1788int
1789apic_polarity(int apic, int pin)
1790{
1791	int     x;
1792
1793	/* search each of the possible INTerrupt sources */
1794	for (x = 0; x < nintrs; ++x)
1795		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1796		    (pin == io_apic_ints[x].dst_apic_int))
1797			return (io_apic_ints[x].int_flags & 0x03);
1798
1799	return -1;		/* NOT found */
1800}
1801
1802
1803/*
1804 * set data according to MP defaults
1805 * FIXME: probably not complete yet...
1806 */
1807static void
1808default_mp_table(int type)
1809{
1810	int     ap_cpu_id;
1811#if defined(APIC_IO)
1812	int     io_apic_id;
1813	int     pin;
1814#endif	/* APIC_IO */
1815
1816#if 0
1817	printf("  MP default config type: %d\n", type);
1818	switch (type) {
1819	case 1:
1820		printf("   bus: ISA, APIC: 82489DX\n");
1821		break;
1822	case 2:
1823		printf("   bus: EISA, APIC: 82489DX\n");
1824		break;
1825	case 3:
1826		printf("   bus: EISA, APIC: 82489DX\n");
1827		break;
1828	case 4:
1829		printf("   bus: MCA, APIC: 82489DX\n");
1830		break;
1831	case 5:
1832		printf("   bus: ISA+PCI, APIC: Integrated\n");
1833		break;
1834	case 6:
1835		printf("   bus: EISA+PCI, APIC: Integrated\n");
1836		break;
1837	case 7:
1838		printf("   bus: MCA+PCI, APIC: Integrated\n");
1839		break;
1840	default:
1841		printf("   future type\n");
1842		break;
1843		/* NOTREACHED */
1844	}
1845#endif	/* 0 */
1846
1847	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1848	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1849
1850	/* BSP */
1851	CPU_TO_ID(0) = boot_cpu_id;
1852	ID_TO_CPU(boot_cpu_id) = 0;
1853
1854	/* one and only AP */
1855	CPU_TO_ID(1) = ap_cpu_id;
1856	ID_TO_CPU(ap_cpu_id) = 1;
1857
1858#if defined(APIC_IO)
1859	/* one and only IO APIC */
1860	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1861
1862	/*
1863	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1864	 * necessary as some hardware isn't properly setting up the IO APIC
1865	 */
1866#if defined(REALLY_ANAL_IOAPICID_VALUE)
1867	if (io_apic_id != 2) {
1868#else
1869	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1870#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1871		io_apic_set_id(0, 2);
1872		io_apic_id = 2;
1873	}
1874	IO_TO_ID(0) = io_apic_id;
1875	ID_TO_IO(io_apic_id) = 0;
1876#endif	/* APIC_IO */
1877
1878	/* fill out bus entries */
1879	switch (type) {
1880	case 1:
1881	case 2:
1882	case 3:
1883	case 4:
1884	case 5:
1885	case 6:
1886	case 7:
1887		bus_data[0].bus_id = default_data[type - 1][1];
1888		bus_data[0].bus_type = default_data[type - 1][2];
1889		bus_data[1].bus_id = default_data[type - 1][3];
1890		bus_data[1].bus_type = default_data[type - 1][4];
1891		break;
1892
1893	/* case 4: case 7:		   MCA NOT supported */
1894	default:		/* illegal/reserved */
1895		panic("BAD default MP config: %d", type);
1896		/* NOTREACHED */
1897	}
1898
1899#if defined(APIC_IO)
1900	/* general cases from MP v1.4, table 5-2 */
1901	for (pin = 0; pin < 16; ++pin) {
1902		io_apic_ints[pin].int_type = 0;
1903		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1904		io_apic_ints[pin].src_bus_id = 0;
1905		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1906		io_apic_ints[pin].dst_apic_id = io_apic_id;
1907		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1908	}
1909
1910	/* special cases from MP v1.4, table 5-2 */
1911	if (type == 2) {
1912		io_apic_ints[2].int_type = 0xff;	/* N/C */
1913		io_apic_ints[13].int_type = 0xff;	/* N/C */
1914#if !defined(APIC_MIXED_MODE)
1915		/** FIXME: ??? */
1916		panic("sorry, can't support type 2 default yet");
1917#endif	/* APIC_MIXED_MODE */
1918	}
1919	else
1920		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1921
1922	if (type == 7)
1923		io_apic_ints[0].int_type = 0xff;	/* N/C */
1924	else
1925		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1926#endif	/* APIC_IO */
1927}
1928
1929
1930/*
1931 * start each AP in our list
1932 */
1933static int
1934start_all_aps(u_int boot_addr)
1935{
1936	int     x, i, pg;
1937	u_char  mpbiosreason;
1938	u_long  mpbioswarmvec;
1939	struct globaldata *gd;
1940	char *stack;
1941	uintptr_t kptbase;
1942
1943	POSTCODE(START_ALL_APS_POST);
1944
1945	/* initialize BSP's local APIC */
1946	apic_initialize();
1947	bsp_apic_ready = 1;
1948
1949	/* install the AP 1st level boot code */
1950	install_ap_tramp(boot_addr);
1951
1952
1953	/* save the current value of the warm-start vector */
1954	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1955#ifndef PC98
1956	outb(CMOS_REG, BIOS_RESET);
1957	mpbiosreason = inb(CMOS_DATA);
1958#endif
1959
1960	/* record BSP in CPU map */
1961	all_cpus = 1;
1962
1963	/* set up temporary P==V mapping for AP boot */
1964	/* XXX this is a hack, we should boot the AP on its own stack/PTD */
1965	kptbase = (uintptr_t)(void *)KPTphys;
1966	for (x = 0; x < NKPT; x++)
1967		PTD[x] = (pd_entry_t)(PG_V | PG_RW |
1968		    ((kptbase + x * PAGE_SIZE) & PG_FRAME));
1969	invltlb();
1970
1971	/* start each AP */
1972	for (x = 1; x <= mp_naps; ++x) {
1973
1974		/* This is a bit verbose, it will go away soon.  */
1975
1976		/* first page of AP's private space */
1977		pg = x * i386_btop(sizeof(struct privatespace));
1978
1979		/* allocate a new private data page */
1980		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1981
1982		/* wire it into the private page table page */
1983		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1984
1985		/* allocate and set up an idle stack data page */
1986		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1987		for (i = 0; i < UPAGES; i++)
1988			SMPpt[pg + 1 + i] = (pt_entry_t)
1989			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1990
1991		/* prime data page for it to use */
1992		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1993		gd->gd_cpuid = x;
1994
1995		/* setup a vector to our boot code */
1996		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1997		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1998#ifndef PC98
1999		outb(CMOS_REG, BIOS_RESET);
2000		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2001#endif
2002
2003		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2004		bootAP = x;
2005
2006		/* attempt to start the Application Processor */
2007		CHECK_INIT(99);	/* setup checkpoints */
2008		if (!start_ap(x, boot_addr)) {
2009			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2010			CHECK_PRINT("trace");	/* show checkpoints */
2011			/* better panic as the AP may be running loose */
2012			printf("panic y/n? [y] ");
2013			if (cngetc() != 'n')
2014				panic("bye-bye");
2015		}
2016		CHECK_PRINT("trace");		/* show checkpoints */
2017
2018		/* record its version info */
2019		cpu_apic_versions[x] = cpu_apic_versions[0];
2020
2021		all_cpus |= (1 << x);		/* record AP in CPU map */
2022	}
2023
2024	/* build our map of 'other' CPUs */
2025	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2026
2027	/* fill in our (BSP) APIC version */
2028	cpu_apic_versions[0] = lapic.version;
2029
2030	/* restore the warmstart vector */
2031	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2032#ifndef PC98
2033	outb(CMOS_REG, BIOS_RESET);
2034	outb(CMOS_DATA, mpbiosreason);
2035#endif
2036
2037	/*
2038	 * Set up the idle context for the BSP.  Similar to above except
2039	 * that some was done by locore, some by pmap.c and some is implicit
2040	 * because the BSP is cpu#0 and the page is initially zero, and also
2041	 * because we can refer to variables by name on the BSP..
2042	 */
2043
2044	/* Allocate and setup BSP idle stack */
2045	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2046	for (i = 0; i < UPAGES; i++)
2047		SMPpt[1 + i] = (pt_entry_t)
2048		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2049
2050	for (x = 0; x < NKPT; x++)
2051		PTD[x] = 0;
2052	pmap_set_opt();
2053
2054	/* number of APs actually started */
2055	return mp_ncpus - 1;
2056}
2057
2058
2059/*
2060 * load the 1st level AP boot code into base memory.
2061 */
2062
2063/* targets for relocation */
2064extern void bigJump(void);
2065extern void bootCodeSeg(void);
2066extern void bootDataSeg(void);
2067extern void MPentry(void);
2068extern u_int MP_GDT;
2069extern u_int mp_gdtbase;
2070
2071static void
2072install_ap_tramp(u_int boot_addr)
2073{
2074	int     x;
2075	int     size = *(int *) ((u_long) & bootMP_size);
2076	u_char *src = (u_char *) ((u_long) bootMP);
2077	u_char *dst = (u_char *) boot_addr + KERNBASE;
2078	u_int   boot_base = (u_int) bootMP;
2079	u_int8_t *dst8;
2080	u_int16_t *dst16;
2081	u_int32_t *dst32;
2082
2083	POSTCODE(INSTALL_AP_TRAMP_POST);
2084
2085	for (x = 0; x < size; ++x)
2086		*dst++ = *src++;
2087
2088	/*
2089	 * modify addresses in code we just moved to basemem. unfortunately we
2090	 * need fairly detailed info about mpboot.s for this to work.  changes
2091	 * to mpboot.s might require changes here.
2092	 */
2093
2094	/* boot code is located in KERNEL space */
2095	dst = (u_char *) boot_addr + KERNBASE;
2096
2097	/* modify the lgdt arg */
2098	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2099	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2100
2101	/* modify the ljmp target for MPentry() */
2102	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2103	*dst32 = ((u_int) MPentry - KERNBASE);
2104
2105	/* modify the target for boot code segment */
2106	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2107	dst8 = (u_int8_t *) (dst16 + 1);
2108	*dst16 = (u_int) boot_addr & 0xffff;
2109	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2110
2111	/* modify the target for boot data segment */
2112	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2113	dst8 = (u_int8_t *) (dst16 + 1);
2114	*dst16 = (u_int) boot_addr & 0xffff;
2115	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2116}
2117
2118
2119/*
2120 * this function starts the AP (application processor) identified
2121 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2122 * to accomplish this.  This is necessary because of the nuances
2123 * of the different hardware we might encounter.  It ain't pretty,
2124 * but it seems to work.
2125 */
2126static int
2127start_ap(int logical_cpu, u_int boot_addr)
2128{
2129	int     physical_cpu;
2130	int     vector;
2131	int     cpus;
2132	u_long  icr_lo, icr_hi;
2133
2134	POSTCODE(START_AP_POST);
2135
2136	/* get the PHYSICAL APIC ID# */
2137	physical_cpu = CPU_TO_ID(logical_cpu);
2138
2139	/* calculate the vector */
2140	vector = (boot_addr >> 12) & 0xff;
2141
2142	/* used as a watchpoint to signal AP startup */
2143	cpus = mp_ncpus;
2144
2145	/*
2146	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2147	 * and running the target CPU. OR this INIT IPI might be latched (P5
2148	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2149	 * ignored.
2150	 */
2151
2152	/* setup the address for the target AP */
2153	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2154	icr_hi |= (physical_cpu << 24);
2155	lapic.icr_hi = icr_hi;
2156
2157	/* do an INIT IPI: assert RESET */
2158	icr_lo = lapic.icr_lo & 0xfff00000;
2159	lapic.icr_lo = icr_lo | 0x0000c500;
2160
2161	/* wait for pending status end */
2162	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2163		 /* spin */ ;
2164
2165	/* do an INIT IPI: deassert RESET */
2166	lapic.icr_lo = icr_lo | 0x00008500;
2167
2168	/* wait for pending status end */
2169	u_sleep(10000);		/* wait ~10mS */
2170	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2171		 /* spin */ ;
2172
2173	/*
2174	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2175	 * latched, (P5 bug) this 1st STARTUP would then terminate
2176	 * immediately, and the previously started INIT IPI would continue. OR
2177	 * the previous INIT IPI has already run. and this STARTUP IPI will
2178	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2179	 * will run.
2180	 */
2181
2182	/* do a STARTUP IPI */
2183	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2184	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2185		 /* spin */ ;
2186	u_sleep(200);		/* wait ~200uS */
2187
2188	/*
2189	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2190	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2191	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2192	 * recognized after hardware RESET or INIT IPI.
2193	 */
2194
2195	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2196	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2197		 /* spin */ ;
2198	u_sleep(200);		/* wait ~200uS */
2199
2200	/* wait for it to start */
2201	set_apic_timer(5000000);/* == 5 seconds */
2202	while (read_apic_timer())
2203		if (mp_ncpus > cpus)
2204			return 1;	/* return SUCCESS */
2205
2206	return 0;		/* return FAILURE */
2207}
2208
2209/*
2210 * Flush the TLB on all other CPU's
2211 *
2212 * XXX: Needs to handshake and wait for completion before proceding.
2213 */
2214void
2215smp_invltlb(void)
2216{
2217#if defined(APIC_IO)
2218	if (smp_started && invltlb_ok)
2219		smp_ipi_all_but_self(IPI_INVLTLB);
2220#endif  /* APIC_IO */
2221}
2222
2223void
2224invlpg(u_int addr)
2225{
2226	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2227
2228	/* send a message to the other CPUs */
2229	smp_invltlb();
2230}
2231
2232void
2233invltlb(void)
2234{
2235	u_long  temp;
2236
2237	/*
2238	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2239	 * inlined.
2240	 */
2241	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2242
2243	/* send a message to the other CPUs */
2244	smp_invltlb();
2245}
2246
2247
2248/*
2249 * This is called once the rest of the system is up and running and we're
2250 * ready to let the AP's out of the pen.
2251 */
2252void
2253ap_init(void)
2254{
2255	u_int	apic_id;
2256
2257	/* spin until all the AP's are ready */
2258	while (!aps_ready)
2259		/* spin */ ;
2260
2261	/*
2262	 * Set curproc to our per-cpu idleproc so that mutexes have
2263	 * something unique to lock with.
2264	 */
2265	PCPU_SET(curproc, PCPU_GET(idleproc));
2266	PCPU_SET(spinlocks, NULL);
2267
2268	/* lock against other AP's that are waking up */
2269	mtx_lock_spin(&ap_boot_mtx);
2270
2271	/* BSP may have changed PTD while we're waiting for the lock */
2272	cpu_invltlb();
2273
2274	smp_cpus++;
2275
2276#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2277	lidt(&r_idt);
2278#endif
2279
2280	/* Build our map of 'other' CPUs. */
2281	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2282
2283	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2284
2285	/* set up CPU registers and state */
2286	cpu_setregs();
2287
2288	/* set up FPU state on the AP */
2289	npxinit(__INITIAL_NPXCW__);
2290
2291	/* A quick check from sanity claus */
2292	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2293	if (PCPU_GET(cpuid) != apic_id) {
2294		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2295		printf("SMP: apic_id = %d\n", apic_id);
2296		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2297		panic("cpuid mismatch! boom!!");
2298	}
2299
2300	/* Init local apic for irq's */
2301	apic_initialize();
2302
2303	/* Set memory range attributes for this CPU to match the BSP */
2304	mem_range_AP_init();
2305
2306	/*
2307	 * Activate smp_invltlb, although strictly speaking, this isn't
2308	 * quite correct yet.  We should have a bitfield for cpus willing
2309	 * to accept TLB flush IPI's or something and sync them.
2310	 */
2311	if (smp_cpus == mp_ncpus) {
2312		invltlb_ok = 1;
2313		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2314		smp_active = 1;	 /* historic */
2315	}
2316
2317	/* let other AP's wake up now */
2318	mtx_unlock_spin(&ap_boot_mtx);
2319
2320	/* wait until all the AP's are up */
2321	while (smp_started == 0)
2322		; /* nothing */
2323
2324	microuptime(PCPU_PTR(switchtime));
2325	PCPU_SET(switchticks, ticks);
2326
2327	/* ok, now grab sched_lock and enter the scheduler */
2328	enable_intr();
2329	mtx_lock_spin(&sched_lock);
2330	cpu_throw();	/* doesn't return */
2331
2332	panic("scheduler returned us to ap_init");
2333}
2334
2335#define CHECKSTATE_USER	0
2336#define CHECKSTATE_SYS	1
2337#define CHECKSTATE_INTR	2
2338
2339/* Do not staticize.  Used from apic_vector.s */
2340struct proc*	checkstate_curproc[MAXCPU];
2341int		checkstate_cpustate[MAXCPU];
2342u_long		checkstate_pc[MAXCPU];
2343
2344#define PC_TO_INDEX(pc, prof)				\
2345        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2346            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2347
2348static void
2349addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2350{
2351	int i;
2352	struct uprof *prof;
2353	u_long pc;
2354
2355	pc = checkstate_pc[id];
2356	prof = &p->p_stats->p_prof;
2357	if (pc >= prof->pr_off &&
2358	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2359		mtx_assert(&sched_lock, MA_OWNED);
2360		if ((p->p_sflag & PS_OWEUPC) == 0) {
2361			prof->pr_addr = pc;
2362			prof->pr_ticks = 1;
2363			p->p_sflag |= PS_OWEUPC;
2364		}
2365		*astmap |= (1 << id);
2366	}
2367}
2368
2369static void
2370forwarded_statclock(int id, int pscnt, int *astmap)
2371{
2372	struct pstats *pstats;
2373	long rss;
2374	struct rusage *ru;
2375	struct vmspace *vm;
2376	int cpustate;
2377	struct proc *p;
2378#ifdef GPROF
2379	register struct gmonparam *g;
2380	int i;
2381#endif
2382
2383	mtx_assert(&sched_lock, MA_OWNED);
2384	p = checkstate_curproc[id];
2385	cpustate = checkstate_cpustate[id];
2386
2387	/* XXX */
2388	if (p->p_ithd)
2389		cpustate = CHECKSTATE_INTR;
2390	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2391		cpustate = CHECKSTATE_SYS;
2392
2393	switch (cpustate) {
2394	case CHECKSTATE_USER:
2395		if (p->p_sflag & PS_PROFIL)
2396			addupc_intr_forwarded(p, id, astmap);
2397		if (pscnt > 1)
2398			return;
2399		p->p_uticks++;
2400		if (p->p_nice > NZERO)
2401			cp_time[CP_NICE]++;
2402		else
2403			cp_time[CP_USER]++;
2404		break;
2405	case CHECKSTATE_SYS:
2406#ifdef GPROF
2407		/*
2408		 * Kernel statistics are just like addupc_intr, only easier.
2409		 */
2410		g = &_gmonparam;
2411		if (g->state == GMON_PROF_ON) {
2412			i = checkstate_pc[id] - g->lowpc;
2413			if (i < g->textsize) {
2414				i /= HISTFRACTION * sizeof(*g->kcount);
2415				g->kcount[i]++;
2416			}
2417		}
2418#endif
2419		if (pscnt > 1)
2420			return;
2421
2422		p->p_sticks++;
2423		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2424			cp_time[CP_IDLE]++;
2425		else
2426			cp_time[CP_SYS]++;
2427		break;
2428	case CHECKSTATE_INTR:
2429	default:
2430#ifdef GPROF
2431		/*
2432		 * Kernel statistics are just like addupc_intr, only easier.
2433		 */
2434		g = &_gmonparam;
2435		if (g->state == GMON_PROF_ON) {
2436			i = checkstate_pc[id] - g->lowpc;
2437			if (i < g->textsize) {
2438				i /= HISTFRACTION * sizeof(*g->kcount);
2439				g->kcount[i]++;
2440			}
2441		}
2442#endif
2443		if (pscnt > 1)
2444			return;
2445		KASSERT(p != NULL, ("NULL process in interrupt state"));
2446		p->p_iticks++;
2447		cp_time[CP_INTR]++;
2448	}
2449
2450	schedclock(p);
2451
2452	/* Update resource usage integrals and maximums. */
2453	if ((pstats = p->p_stats) != NULL &&
2454	    (ru = &pstats->p_ru) != NULL &&
2455	    (vm = p->p_vmspace) != NULL) {
2456		ru->ru_ixrss += pgtok(vm->vm_tsize);
2457		ru->ru_idrss += pgtok(vm->vm_dsize);
2458		ru->ru_isrss += pgtok(vm->vm_ssize);
2459		rss = pgtok(vmspace_resident_count(vm));
2460		if (ru->ru_maxrss < rss)
2461			ru->ru_maxrss = rss;
2462	}
2463}
2464
2465void
2466forward_statclock(int pscnt)
2467{
2468	int map;
2469	int id;
2470	int i;
2471
2472	/* Kludge. We don't yet have separate locks for the interrupts
2473	 * and the kernel. This means that we cannot let the other processors
2474	 * handle complex interrupts while inhibiting them from entering
2475	 * the kernel in a non-interrupt context.
2476	 *
2477	 * What we can do, without changing the locking mechanisms yet,
2478	 * is letting the other processors handle a very simple interrupt
2479	 * (wich determines the processor states), and do the main
2480	 * work ourself.
2481	 */
2482
2483	CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2484
2485	if (!smp_started || !invltlb_ok || cold || panicstr)
2486		return;
2487
2488	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2489
2490	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2491	checkstate_probed_cpus = 0;
2492	if (map != 0)
2493		smp_ipi_selected(map, IPI_CHECKSTATE);
2494
2495	i = 0;
2496	while (checkstate_probed_cpus != map) {
2497		/* spin */
2498		i++;
2499		if (i == 100000) {
2500#ifdef DIAGNOSTIC
2501			printf("forward_statclock: checkstate %x\n",
2502			       checkstate_probed_cpus);
2503#endif
2504			break;
2505		}
2506	}
2507
2508	/*
2509	 * Step 2: walk through other processors processes, update ticks and
2510	 * profiling info.
2511	 */
2512
2513	map = 0;
2514	for (id = 0; id < mp_ncpus; id++) {
2515		if (id == PCPU_GET(cpuid))
2516			continue;
2517		if (((1 << id) & checkstate_probed_cpus) == 0)
2518			continue;
2519		forwarded_statclock(id, pscnt, &map);
2520	}
2521	if (map != 0) {
2522		checkstate_need_ast |= map;
2523		smp_ipi_selected(map, IPI_AST);
2524		i = 0;
2525		while ((checkstate_need_ast & map) != 0) {
2526			/* spin */
2527			i++;
2528			if (i > 100000) {
2529#ifdef DIAGNOSTIC
2530				printf("forward_statclock: dropped ast 0x%x\n",
2531				       checkstate_need_ast & map);
2532#endif
2533				break;
2534			}
2535		}
2536	}
2537}
2538
2539void
2540forward_hardclock(int pscnt)
2541{
2542	int map;
2543	int id;
2544	struct proc *p;
2545	struct pstats *pstats;
2546	int i;
2547
2548	/* Kludge. We don't yet have separate locks for the interrupts
2549	 * and the kernel. This means that we cannot let the other processors
2550	 * handle complex interrupts while inhibiting them from entering
2551	 * the kernel in a non-interrupt context.
2552	 *
2553	 * What we can do, without changing the locking mechanisms yet,
2554	 * is letting the other processors handle a very simple interrupt
2555	 * (wich determines the processor states), and do the main
2556	 * work ourself.
2557	 */
2558
2559	CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2560
2561	if (!smp_started || !invltlb_ok || cold || panicstr)
2562		return;
2563
2564	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2565
2566	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2567	checkstate_probed_cpus = 0;
2568	if (map != 0)
2569		smp_ipi_selected(map, IPI_CHECKSTATE);
2570
2571	i = 0;
2572	while (checkstate_probed_cpus != map) {
2573		/* spin */
2574		i++;
2575		if (i == 100000) {
2576#ifdef DIAGNOSTIC
2577			printf("forward_hardclock: checkstate %x\n",
2578			       checkstate_probed_cpus);
2579#endif
2580			break;
2581		}
2582	}
2583
2584	/*
2585	 * Step 2: walk through other processors processes, update virtual
2586	 * timer and profiling timer. If stathz == 0, also update ticks and
2587	 * profiling info.
2588	 */
2589
2590	map = 0;
2591	for (id = 0; id < mp_ncpus; id++) {
2592		if (id == PCPU_GET(cpuid))
2593			continue;
2594		if (((1 << id) & checkstate_probed_cpus) == 0)
2595			continue;
2596		p = checkstate_curproc[id];
2597		if (p) {
2598			pstats = p->p_stats;
2599			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2600			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2601			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2602				p->p_sflag |= PS_ALRMPEND;
2603				map |= (1 << id);
2604			}
2605			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2606			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2607				p->p_sflag |= PS_PROFPEND;
2608				map |= (1 << id);
2609			}
2610		}
2611		if (stathz == 0) {
2612			forwarded_statclock( id, pscnt, &map);
2613		}
2614	}
2615	if (map != 0) {
2616		checkstate_need_ast |= map;
2617		smp_ipi_selected(map, IPI_AST);
2618		i = 0;
2619		while ((checkstate_need_ast & map) != 0) {
2620			/* spin */
2621			i++;
2622			if (i > 100000) {
2623#ifdef DIAGNOSTIC
2624				printf("forward_hardclock: dropped ast 0x%x\n",
2625				       checkstate_need_ast & map);
2626#endif
2627				break;
2628			}
2629		}
2630	}
2631}
2632
2633void
2634forward_signal(struct proc *p)
2635{
2636	int map;
2637	int id;
2638	int i;
2639
2640	/* Kludge. We don't yet have separate locks for the interrupts
2641	 * and the kernel. This means that we cannot let the other processors
2642	 * handle complex interrupts while inhibiting them from entering
2643	 * the kernel in a non-interrupt context.
2644	 *
2645	 * What we can do, without changing the locking mechanisms yet,
2646	 * is letting the other processors handle a very simple interrupt
2647	 * (wich determines the processor states), and do the main
2648	 * work ourself.
2649	 */
2650
2651	CTR1(KTR_SMP, "forward_signal(%p)", p);
2652
2653	if (!smp_started || !invltlb_ok || cold || panicstr)
2654		return;
2655	if (!forward_signal_enabled)
2656		return;
2657	mtx_lock_spin(&sched_lock);
2658	while (1) {
2659		if (p->p_stat != SRUN) {
2660			mtx_unlock_spin(&sched_lock);
2661			return;
2662		}
2663		id = p->p_oncpu;
2664		mtx_unlock_spin(&sched_lock);
2665		if (id == 0xff)
2666			return;
2667		map = (1<<id);
2668		checkstate_need_ast |= map;
2669		smp_ipi_selected(map, IPI_AST);
2670		i = 0;
2671		while ((checkstate_need_ast & map) != 0) {
2672			/* spin */
2673			i++;
2674			if (i > 100000) {
2675#if 0
2676				printf("forward_signal: dropped ast 0x%x\n",
2677				       checkstate_need_ast & map);
2678#endif
2679				break;
2680			}
2681		}
2682		mtx_lock_spin(&sched_lock);
2683		if (id == p->p_oncpu) {
2684			mtx_unlock_spin(&sched_lock);
2685			return;
2686		}
2687	}
2688}
2689
2690void
2691forward_roundrobin(void)
2692{
2693	u_int map;
2694	int i;
2695
2696	CTR0(KTR_SMP, "forward_roundrobin()");
2697
2698	if (!smp_started || !invltlb_ok || cold || panicstr)
2699		return;
2700	if (!forward_roundrobin_enabled)
2701		return;
2702	resched_cpus |= PCPU_GET(other_cpus);
2703	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2704#if 1
2705	smp_ipi_selected(map, IPI_AST);
2706#else
2707	smp_ipi_all_but_self(IPI_AST);
2708#endif
2709	i = 0;
2710	while ((checkstate_need_ast & map) != 0) {
2711		/* spin */
2712		i++;
2713		if (i > 100000) {
2714#if 0
2715			printf("forward_roundrobin: dropped ast 0x%x\n",
2716			       checkstate_need_ast & map);
2717#endif
2718			break;
2719		}
2720	}
2721}
2722
2723/*
2724 * When called the executing CPU will send an IPI to all other CPUs
2725 *  requesting that they halt execution.
2726 *
2727 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2728 *
2729 *  - Signals all CPUs in map to stop.
2730 *  - Waits for each to stop.
2731 *
2732 * Returns:
2733 *  -1: error
2734 *   0: NA
2735 *   1: ok
2736 *
2737 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2738 *            from executing at same time.
2739 */
2740int
2741stop_cpus(u_int map)
2742{
2743	int count = 0;
2744
2745	if (!smp_started)
2746		return 0;
2747
2748	/* send the Xcpustop IPI to all CPUs in map */
2749	smp_ipi_selected(map, IPI_STOP);
2750
2751	while (count++ < 100000 && (stopped_cpus & map) != map)
2752		/* spin */ ;
2753
2754#ifdef DIAGNOSTIC
2755	if ((stopped_cpus & map) != map)
2756		printf("Warning: CPUs 0x%x did not stop!\n",
2757		    (~(stopped_cpus & map)) & map);
2758#endif
2759
2760	return 1;
2761}
2762
2763
2764/*
2765 * Called by a CPU to restart stopped CPUs.
2766 *
2767 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2768 *
2769 *  - Signals all CPUs in map to restart.
2770 *  - Waits for each to restart.
2771 *
2772 * Returns:
2773 *  -1: error
2774 *   0: NA
2775 *   1: ok
2776 */
2777int
2778restart_cpus(u_int map)
2779{
2780	int count = 0;
2781
2782	if (!smp_started)
2783		return 0;
2784
2785	started_cpus = map;		/* signal other cpus to restart */
2786
2787	/* wait for each to clear its bit */
2788	while (count++ < 100000 && (stopped_cpus & map) != 0)
2789		/* spin */ ;
2790
2791#ifdef DIAGNOSTIC
2792	if ((stopped_cpus & map) != 0)
2793		printf("Warning: CPUs 0x%x did not restart!\n",
2794		    (~(stopped_cpus & map)) & map);
2795#endif
2796
2797	return 1;
2798}
2799
2800
2801#ifdef APIC_INTR_REORDER
2802/*
2803 *	Maintain mapping from softintr vector to isr bit in local apic.
2804 */
2805void
2806set_lapic_isrloc(int intr, int vector)
2807{
2808	if (intr < 0 || intr > 32)
2809		panic("set_apic_isrloc: bad intr argument: %d",intr);
2810	if (vector < ICU_OFFSET || vector > 255)
2811		panic("set_apic_isrloc: bad vector argument: %d",vector);
2812	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2813	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2814}
2815#endif
2816
2817/*
2818 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2819 * (if specified), rendezvous, execute the action function (if specified),
2820 * rendezvous again, execute the teardown function (if specified), and then
2821 * resume.
2822 *
2823 * Note that the supplied external functions _must_ be reentrant and aware
2824 * that they are running in parallel and in an unknown lock context.
2825 */
2826static void (*smp_rv_setup_func)(void *arg);
2827static void (*smp_rv_action_func)(void *arg);
2828static void (*smp_rv_teardown_func)(void *arg);
2829static void *smp_rv_func_arg;
2830static volatile int smp_rv_waiters[2];
2831
2832void
2833smp_rendezvous_action(void)
2834{
2835	/* setup function */
2836	if (smp_rv_setup_func != NULL)
2837		smp_rv_setup_func(smp_rv_func_arg);
2838	/* spin on entry rendezvous */
2839	atomic_add_int(&smp_rv_waiters[0], 1);
2840	while (smp_rv_waiters[0] < mp_ncpus)
2841		;
2842	/* action function */
2843	if (smp_rv_action_func != NULL)
2844		smp_rv_action_func(smp_rv_func_arg);
2845	/* spin on exit rendezvous */
2846	atomic_add_int(&smp_rv_waiters[1], 1);
2847	while (smp_rv_waiters[1] < mp_ncpus)
2848		;
2849	/* teardown function */
2850	if (smp_rv_teardown_func != NULL)
2851		smp_rv_teardown_func(smp_rv_func_arg);
2852}
2853
2854void
2855smp_rendezvous(void (* setup_func)(void *),
2856	       void (* action_func)(void *),
2857	       void (* teardown_func)(void *),
2858	       void *arg)
2859{
2860
2861	/* obtain rendezvous lock */
2862	mtx_lock_spin(&smp_rv_mtx);
2863
2864	/* set static function pointers */
2865	smp_rv_setup_func = setup_func;
2866	smp_rv_action_func = action_func;
2867	smp_rv_teardown_func = teardown_func;
2868	smp_rv_func_arg = arg;
2869	smp_rv_waiters[0] = 0;
2870	smp_rv_waiters[1] = 0;
2871
2872	/*
2873	 * signal other processors, which will enter the IPI with interrupts off
2874	 */
2875	smp_ipi_all_but_self(IPI_RENDEZVOUS);
2876
2877	/* call executor function */
2878	smp_rendezvous_action();
2879
2880	/* release lock */
2881	mtx_unlock_spin(&smp_rv_mtx);
2882}
2883
2884/*
2885 * send an IPI to a set of cpus.
2886 */
2887void
2888smp_ipi_selected(u_int32_t cpus, u_int ipi)
2889{
2890
2891	CTR2(KTR_SMP, __func__ ": cpus: %x ipi: %x", cpus, ipi);
2892	selected_apic_ipi(cpus, ipi, APIC_DELMODE_FIXED);
2893}
2894
2895/*
2896 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
2897 */
2898void
2899smp_ipi_all(u_int ipi)
2900{
2901
2902	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2903	apic_ipi(APIC_DEST_ALLISELF, ipi, APIC_DELMODE_FIXED);
2904}
2905
2906/*
2907 * send an IPI to all CPUs EXCEPT myself
2908 */
2909void
2910smp_ipi_all_but_self(u_int ipi)
2911{
2912
2913	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2914	apic_ipi(APIC_DEST_ALLESELF, ipi, APIC_DELMODE_FIXED);
2915}
2916
2917/*
2918 * send an IPI to myself
2919 */
2920void
2921smp_ipi_self(u_int ipi)
2922{
2923
2924	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2925	apic_ipi(APIC_DEST_SELF, ipi, APIC_DELMODE_FIXED);
2926}
2927
2928void
2929release_aps(void *dummy __unused)
2930{
2931	atomic_store_rel_int(&aps_ready, 1);
2932}
2933
2934SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2935