mp_machdep.c revision 71525
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/i386/mp_machdep.c 71525 2001-01-24 09:48:52Z jhb $
26 */
27
28#include "opt_cpu.h"
29#include "opt_user_ldt.h"
30
31#ifdef SMP
32#include <machine/smptests.h>
33#else
34#error
35#endif
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#ifdef BETTER_CLOCK
47#include <sys/dkstat.h>
48#endif
49#include <sys/cons.h>	/* cngetc() */
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56#ifdef BETTER_CLOCK
57#include <sys/lock.h>
58#include <vm/vm_map.h>
59#include <sys/user.h>
60#ifdef GPROF
61#include <sys/gmon.h>
62#endif
63#endif
64
65#include <machine/smp.h>
66#include <machine/apic.h>
67#include <machine/atomic.h>
68#include <machine/cpufunc.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h>		/* setidt() */
79#include <i386/isa/icu.h>		/* IPIs */
80#include <i386/isa/intr_machdep.h>	/* IPIs */
81#endif	/* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1	mpfps->mpfb1
87#endif  /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET		0
90#define WARMBOOT_OFF		(KERNBASE + 0x0467)
91#define WARMBOOT_SEG		(KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE		(0xe8000)
95#define BIOS_SIZE		(0x18000)
96#else
97#define BIOS_BASE		(0xf0000)
98#define BIOS_SIZE		(0x10000)
99#endif
100#define BIOS_COUNT		(BIOS_SIZE/4)
101
102#define CMOS_REG		(0x70)
103#define CMOS_DATA		(0x71)
104#define BIOS_RESET		(0x0f)
105#define BIOS_WARM		(0x0a)
106
107#define PROCENTRY_FLAG_EN	0x01
108#define PROCENTRY_FLAG_BP	0x02
109#define IOAPICENTRY_FLAG_EN	0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114	char    signature[4];
115	void   *pap;
116	u_char  length;
117	u_char  spec_rev;
118	u_char  checksum;
119	u_char  mpfb1;
120	u_char  mpfb2;
121	u_char  mpfb3;
122	u_char  mpfb4;
123	u_char  mpfb5;
124}      *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128	char    signature[4];
129	u_short base_table_length;
130	u_char  spec_rev;
131	u_char  checksum;
132	u_char  oem_id[8];
133	u_char  product_id[12];
134	void   *oem_table_pointer;
135	u_short oem_table_size;
136	u_short entry_count;
137	void   *apic_address;
138	u_short extended_table_length;
139	u_char  extended_table_checksum;
140	u_char  reserved;
141}      *mpcth_t;
142
143
144typedef struct PROCENTRY {
145	u_char  type;
146	u_char  apic_id;
147	u_char  apic_version;
148	u_char  cpu_flags;
149	u_long  cpu_signature;
150	u_long  feature_flags;
151	u_long  reserved1;
152	u_long  reserved2;
153}      *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156	u_char  type;
157	u_char  bus_id;
158	char    bus_type[6];
159}      *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162	u_char  type;
163	u_char  apic_id;
164	u_char  apic_version;
165	u_char  apic_flags;
166	void   *apic_address;
167}      *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170	u_char  type;
171	u_char  int_type;
172	u_short int_flags;
173	u_char  src_bus_id;
174	u_char  src_bus_irq;
175	u_char  dst_apic_id;
176	u_char  dst_apic_int;
177}      *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181	u_char  type;
182	u_char  length;
183	char    name[16];
184}       basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D);				\
199	CHECK_WRITE(0x34, (D));			\
200	CHECK_WRITE(0x35, (D));			\
201	CHECK_WRITE(0x36, (D));			\
202	CHECK_WRITE(0x37, (D));			\
203	CHECK_WRITE(0x38, (D));			\
204	CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S);				\
207	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208	   (S),					\
209	   CHECK_READ(0x34),			\
210	   CHECK_READ(0x35),			\
211	   CHECK_READ(0x36),			\
212	   CHECK_READ(0x37),			\
213	   CHECK_READ(0x38),			\
214	   CHECK_READ(0x39));
215
216#else				/* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif				/* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST	0x10
227#define MP_PROBE_POST		0x11
228#define MPTABLE_PASS1_POST	0x12
229
230#define MP_START_POST		0x13
231#define MP_ENABLE_POST		0x14
232#define MPTABLE_PASS2_POST	0x15
233
234#define START_ALL_APS_POST	0x16
235#define INSTALL_AP_TRAMP_POST	0x17
236#define START_AP_POST		0x18
237
238#define MP_ANNOUNCE_POST	0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct simplelock	ap_boot_lock;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int	current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250int	mp_ncpus;		/* # of CPUs, including BSP */
251int	mp_naps;		/* # of Applications processors */
252int	mp_nbusses;		/* # of busses */
253int	mp_napics;		/* # of IO APICs */
254int	boot_cpu_id;		/* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257extern	int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_REORDER
263struct {
264	volatile int *location;
265	int bit;
266} apic_isrbit_location[32];
267#endif
268
269struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
270
271/*
272 * APIC ID logical/physical mapping structures.
273 * We oversize these to simplify boot-time config.
274 */
275int     cpu_num_to_apic_id[NAPICID];
276int     io_num_to_apic_id[NAPICID];
277int     apic_id_to_logical[NAPICID];
278
279
280/* Bitmap of all available CPUs */
281u_int	all_cpus;
282
283/* AP uses this during bootstrap.  Do not staticize.  */
284char *bootSTK;
285static int bootAP;
286
287/* Hotwire a 0->4MB V==P mapping */
288extern pt_entry_t *KPTphys;
289
290/* SMP page table page */
291extern pt_entry_t *SMPpt;
292
293struct pcb stoppcbs[MAXCPU];
294
295int smp_started;		/* has the system started? */
296int smp_active = 0;		/* are the APs allowed to run? */
297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298
299/* XXX maybe should be hw.ncpu */
300static int smp_cpus = 1;	/* how many cpu's running */
301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302
303int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305
306/* Enable forwarding of a signal to a process running on a different CPU */
307static int forward_signal_enabled = 1;
308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309	   &forward_signal_enabled, 0, "");
310
311/* Enable forwarding of roundrobin to all other cpus */
312static int forward_roundrobin_enabled = 1;
313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314	   &forward_roundrobin_enabled, 0, "");
315
316
317/*
318 * Local data and functions.
319 */
320
321static int	mp_capable;
322static u_int	boot_address;
323static u_int	base_memory;
324
325static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
326static mpfps_t	mpfps;
327static int	search_for_sig(u_int32_t target, int count);
328static void	mp_enable(u_int boot_addr);
329
330static void	mptable_pass1(void);
331static int	mptable_pass2(void);
332static void	default_mp_table(int type);
333static void	fix_mp_table(void);
334static void	setup_apic_irq_mapping(void);
335static void	init_locks(void);
336static int	start_all_aps(u_int boot_addr);
337static void	install_ap_tramp(u_int boot_addr);
338static int	start_ap(int logicalCpu, u_int boot_addr);
339void		ap_init(void);
340static int	apic_int_is_bus_type(int intr, int bus_type);
341static void	release_aps(void *dummy);
342
343/*
344 * initialize all the SMP locks
345 */
346
347/* critical region around IO APIC, apic_imen */
348struct simplelock	imen_lock;
349
350/* lock region used by kernel profiling */
351struct simplelock	mcount_lock;
352
353#ifdef USE_COMLOCK
354/* locks com (tty) data/hardware accesses: a FASTINTR() */
355struct simplelock	com_lock;
356#endif /* USE_COMLOCK */
357
358/* lock around the MP rendezvous */
359static struct simplelock smp_rv_lock;
360
361/* only 1 CPU can panic at a time :) */
362struct simplelock	panic_lock;
363
364static void
365init_locks(void)
366{
367	s_lock_init(&mcount_lock);
368
369	s_lock_init(&imen_lock);
370	s_lock_init(&smp_rv_lock);
371	s_lock_init(&panic_lock);
372
373#ifdef USE_COMLOCK
374	s_lock_init(&com_lock);
375#endif /* USE_COMLOCK */
376
377	s_lock_init(&ap_boot_lock);
378}
379
380/*
381 * Calculate usable address in base memory for AP trampoline code.
382 */
383u_int
384mp_bootaddress(u_int basemem)
385{
386	POSTCODE(MP_BOOTADDRESS_POST);
387
388	base_memory = basemem * 1024;	/* convert to bytes */
389
390	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
391	if ((base_memory - boot_address) < bootMP_size)
392		boot_address -= 4096;	/* not enough, lower by 4k */
393
394	return boot_address;
395}
396
397
398/*
399 * Look for an Intel MP spec table (ie, SMP capable hardware).
400 */
401int
402mp_probe(void)
403{
404	int     x;
405	u_long  segment;
406	u_int32_t target;
407
408	POSTCODE(MP_PROBE_POST);
409
410	/* see if EBDA exists */
411	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
412		/* search first 1K of EBDA */
413		target = (u_int32_t) (segment << 4);
414		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
415			goto found;
416	} else {
417		/* last 1K of base memory, effective 'top of base' passed in */
418		target = (u_int32_t) (base_memory - 0x400);
419		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
420			goto found;
421	}
422
423	/* search the BIOS */
424	target = (u_int32_t) BIOS_BASE;
425	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
426		goto found;
427
428	/* nothing found */
429	mpfps = (mpfps_t)0;
430	mp_capable = 0;
431	return 0;
432
433found:
434	/* calculate needed resources */
435	mpfps = (mpfps_t)x;
436	mptable_pass1();
437
438	/* flag fact that we are running multiple processors */
439	mp_capable = 1;
440	return 1;
441}
442
443
444/*
445 * Initialize the SMP hardware and the APIC and start up the AP's.
446 */
447void
448mp_start(void)
449{
450	POSTCODE(MP_START_POST);
451
452	/* look for MP capable motherboard */
453	if (mp_capable)
454		mp_enable(boot_address);
455	else
456		panic("MP hardware not found!");
457}
458
459
460/*
461 * Print various information about the SMP system hardware and setup.
462 */
463void
464mp_announce(void)
465{
466	int     x;
467
468	POSTCODE(MP_ANNOUNCE_POST);
469
470	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
471	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
472	printf(", version: 0x%08x", cpu_apic_versions[0]);
473	printf(", at 0x%08x\n", cpu_apic_address);
474	for (x = 1; x <= mp_naps; ++x) {
475		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
476		printf(", version: 0x%08x", cpu_apic_versions[x]);
477		printf(", at 0x%08x\n", cpu_apic_address);
478	}
479
480#if defined(APIC_IO)
481	for (x = 0; x < mp_napics; ++x) {
482		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
483		printf(", version: 0x%08x", io_apic_versions[x]);
484		printf(", at 0x%08x\n", io_apic_address[x]);
485	}
486#else
487	printf(" Warning: APIC I/O disabled\n");
488#endif	/* APIC_IO */
489}
490
491/*
492 * AP cpu's call this to sync up protected mode.
493 */
494void
495init_secondary(void)
496{
497	int	gsel_tss;
498	int	x, myid = bootAP;
499
500	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
501	gdt_segs[GPROC0_SEL].ssd_base =
502		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
503	SMP_prvspace[myid].globaldata.gd_prvspace =
504		&SMP_prvspace[myid].globaldata;
505
506	for (x = 0; x < NGDT; x++) {
507		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
508	}
509
510	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
511	r_gdt.rd_base = (int) &gdt[myid * NGDT];
512	lgdt(&r_gdt);			/* does magic intra-segment return */
513
514	lidt(&r_idt);
515
516	lldt(_default_ldt);
517#ifdef USER_LDT
518	PCPU_SET(currentldt, _default_ldt);
519#endif
520
521	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
522	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
523	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
524	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
525	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
526	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
527	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
528	ltr(gsel_tss);
529
530	pmap_set_opt();
531}
532
533
534#if defined(APIC_IO)
535/*
536 * Final configuration of the BSP's local APIC:
537 *  - disable 'pic mode'.
538 *  - disable 'virtual wire mode'.
539 *  - enable NMI.
540 */
541void
542bsp_apic_configure(void)
543{
544	u_char		byte;
545	u_int32_t	temp;
546
547	/* leave 'pic mode' if necessary */
548	if (picmode) {
549		outb(0x22, 0x70);	/* select IMCR */
550		byte = inb(0x23);	/* current contents */
551		byte |= 0x01;		/* mask external INTR */
552		outb(0x23, byte);	/* disconnect 8259s/NMI */
553	}
554
555	/* mask lint0 (the 8259 'virtual wire' connection) */
556	temp = lapic.lvt_lint0;
557	temp |= APIC_LVT_M;		/* set the mask */
558	lapic.lvt_lint0 = temp;
559
560        /* setup lint1 to handle NMI */
561        temp = lapic.lvt_lint1;
562        temp &= ~APIC_LVT_M;		/* clear the mask */
563        lapic.lvt_lint1 = temp;
564
565	if (bootverbose)
566		apic_dump("bsp_apic_configure()");
567}
568#endif  /* APIC_IO */
569
570
571/*******************************************************************
572 * local functions and data
573 */
574
575/*
576 * start the SMP system
577 */
578static void
579mp_enable(u_int boot_addr)
580{
581	int     x;
582#if defined(APIC_IO)
583	int     apic;
584	u_int   ux;
585#endif	/* APIC_IO */
586
587	POSTCODE(MP_ENABLE_POST);
588
589	/* turn on 4MB of V == P addressing so we can get to MP table */
590	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
591	invltlb();
592
593	/* examine the MP table for needed info, uses physical addresses */
594	x = mptable_pass2();
595
596	*(int *)PTD = 0;
597	invltlb();
598
599	/* can't process default configs till the CPU APIC is pmapped */
600	if (x)
601		default_mp_table(x);
602
603	/* post scan cleanup */
604	fix_mp_table();
605	setup_apic_irq_mapping();
606
607#if defined(APIC_IO)
608
609	/* fill the LOGICAL io_apic_versions table */
610	for (apic = 0; apic < mp_napics; ++apic) {
611		ux = io_apic_read(apic, IOAPIC_VER);
612		io_apic_versions[apic] = ux;
613		io_apic_set_id(apic, IO_TO_ID(apic));
614	}
615
616	/* program each IO APIC in the system */
617	for (apic = 0; apic < mp_napics; ++apic)
618		if (io_apic_setup(apic) < 0)
619			panic("IO APIC setup failure");
620
621	/* install a 'Spurious INTerrupt' vector */
622	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
623	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
624
625	/* install an inter-CPU IPI for TLB invalidation */
626	setidt(XINVLTLB_OFFSET, Xinvltlb,
627	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
628
629#ifdef BETTER_CLOCK
630	/* install an inter-CPU IPI for reading processor state */
631	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
632	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
633#endif
634
635	/* install an inter-CPU IPI for all-CPU rendezvous */
636	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
637	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
638
639	/* install an inter-CPU IPI for forcing an additional software trap */
640	setidt(XCPUAST_OFFSET, Xcpuast,
641	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
642
643	/* install an inter-CPU IPI for CPU stop/restart */
644	setidt(XCPUSTOP_OFFSET, Xcpustop,
645	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
646
647#if defined(TEST_TEST1)
648	/* install a "fake hardware INTerrupt" vector */
649	setidt(XTEST1_OFFSET, Xtest1,
650	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
651#endif  /** TEST_TEST1 */
652
653#endif	/* APIC_IO */
654
655	/* initialize all SMP locks */
656	init_locks();
657
658	/* obtain the ap_boot_lock */
659	s_lock(&ap_boot_lock);
660
661	/* start each Application Processor */
662	start_all_aps(boot_addr);
663}
664
665
666/*
667 * look for the MP spec signature
668 */
669
670/* string defined by the Intel MP Spec as identifying the MP table */
671#define MP_SIG		0x5f504d5f	/* _MP_ */
672#define NEXT(X)		((X) += 4)
673static int
674search_for_sig(u_int32_t target, int count)
675{
676	int     x;
677	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
678
679	for (x = 0; x < count; NEXT(x))
680		if (addr[x] == MP_SIG)
681			/* make array index a byte index */
682			return (target + (x * sizeof(u_int32_t)));
683
684	return -1;
685}
686
687
688static basetable_entry basetable_entry_types[] =
689{
690	{0, 20, "Processor"},
691	{1, 8, "Bus"},
692	{2, 8, "I/O APIC"},
693	{3, 8, "I/O INT"},
694	{4, 8, "Local INT"}
695};
696
697typedef struct BUSDATA {
698	u_char  bus_id;
699	enum busTypes bus_type;
700}       bus_datum;
701
702typedef struct INTDATA {
703	u_char  int_type;
704	u_short int_flags;
705	u_char  src_bus_id;
706	u_char  src_bus_irq;
707	u_char  dst_apic_id;
708	u_char  dst_apic_int;
709	u_char	int_vector;
710}       io_int, local_int;
711
712typedef struct BUSTYPENAME {
713	u_char  type;
714	char    name[7];
715}       bus_type_name;
716
717static bus_type_name bus_type_table[] =
718{
719	{CBUS, "CBUS"},
720	{CBUSII, "CBUSII"},
721	{EISA, "EISA"},
722	{MCA, "MCA"},
723	{UNKNOWN_BUSTYPE, "---"},
724	{ISA, "ISA"},
725	{MCA, "MCA"},
726	{UNKNOWN_BUSTYPE, "---"},
727	{UNKNOWN_BUSTYPE, "---"},
728	{UNKNOWN_BUSTYPE, "---"},
729	{UNKNOWN_BUSTYPE, "---"},
730	{UNKNOWN_BUSTYPE, "---"},
731	{PCI, "PCI"},
732	{UNKNOWN_BUSTYPE, "---"},
733	{UNKNOWN_BUSTYPE, "---"},
734	{UNKNOWN_BUSTYPE, "---"},
735	{UNKNOWN_BUSTYPE, "---"},
736	{XPRESS, "XPRESS"},
737	{UNKNOWN_BUSTYPE, "---"}
738};
739/* from MP spec v1.4, table 5-1 */
740static int default_data[7][5] =
741{
742/*   nbus, id0, type0, id1, type1 */
743	{1, 0, ISA, 255, 255},
744	{1, 0, EISA, 255, 255},
745	{1, 0, EISA, 255, 255},
746	{1, 0, MCA, 255, 255},
747	{2, 0, ISA, 1, PCI},
748	{2, 0, EISA, 1, PCI},
749	{2, 0, MCA, 1, PCI}
750};
751
752
753/* the bus data */
754static bus_datum *bus_data;
755
756/* the IO INT data, one entry per possible APIC INTerrupt */
757static io_int  *io_apic_ints;
758
759static int nintrs;
760
761static int processor_entry	__P((proc_entry_ptr entry, int cpu));
762static int bus_entry		__P((bus_entry_ptr entry, int bus));
763static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
764static int int_entry		__P((int_entry_ptr entry, int intr));
765static int lookup_bus_type	__P((char *name));
766
767
768/*
769 * 1st pass on motherboard's Intel MP specification table.
770 *
771 * initializes:
772 *	mp_ncpus = 1
773 *
774 * determines:
775 *	cpu_apic_address (common to all CPUs)
776 *	io_apic_address[N]
777 *	mp_naps
778 *	mp_nbusses
779 *	mp_napics
780 *	nintrs
781 */
782static void
783mptable_pass1(void)
784{
785	int	x;
786	mpcth_t	cth;
787	int	totalSize;
788	void*	position;
789	int	count;
790	int	type;
791
792	POSTCODE(MPTABLE_PASS1_POST);
793
794	/* clear various tables */
795	for (x = 0; x < NAPICID; ++x) {
796		io_apic_address[x] = ~0;	/* IO APIC address table */
797	}
798
799	/* init everything to empty */
800	mp_naps = 0;
801	mp_nbusses = 0;
802	mp_napics = 0;
803	nintrs = 0;
804
805	/* check for use of 'default' configuration */
806	if (MPFPS_MPFB1 != 0) {
807		/* use default addresses */
808		cpu_apic_address = DEFAULT_APIC_BASE;
809		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
810
811		/* fill in with defaults */
812		mp_naps = 2;		/* includes BSP */
813		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
814#if defined(APIC_IO)
815		mp_napics = 1;
816		nintrs = 16;
817#endif	/* APIC_IO */
818	}
819	else {
820		if ((cth = mpfps->pap) == 0)
821			panic("MP Configuration Table Header MISSING!");
822
823		cpu_apic_address = (vm_offset_t) cth->apic_address;
824
825		/* walk the table, recording info of interest */
826		totalSize = cth->base_table_length - sizeof(struct MPCTH);
827		position = (u_char *) cth + sizeof(struct MPCTH);
828		count = cth->entry_count;
829
830		while (count--) {
831			switch (type = *(u_char *) position) {
832			case 0: /* processor_entry */
833				if (((proc_entry_ptr)position)->cpu_flags
834					& PROCENTRY_FLAG_EN)
835					++mp_naps;
836				break;
837			case 1: /* bus_entry */
838				++mp_nbusses;
839				break;
840			case 2: /* io_apic_entry */
841				if (((io_apic_entry_ptr)position)->apic_flags
842					& IOAPICENTRY_FLAG_EN)
843					io_apic_address[mp_napics++] =
844					    (vm_offset_t)((io_apic_entry_ptr)
845						position)->apic_address;
846				break;
847			case 3: /* int_entry */
848				++nintrs;
849				break;
850			case 4:	/* int_entry */
851				break;
852			default:
853				panic("mpfps Base Table HOSED!");
854				/* NOTREACHED */
855			}
856
857			totalSize -= basetable_entry_types[type].length;
858			(u_char*)position += basetable_entry_types[type].length;
859		}
860	}
861
862	/* qualify the numbers */
863	if (mp_naps > MAXCPU) {
864		printf("Warning: only using %d of %d available CPUs!\n",
865			MAXCPU, mp_naps);
866		mp_naps = MAXCPU;
867	}
868
869	/*
870	 * Count the BSP.
871	 * This is also used as a counter while starting the APs.
872	 */
873	mp_ncpus = 1;
874
875	--mp_naps;	/* subtract the BSP */
876}
877
878
879/*
880 * 2nd pass on motherboard's Intel MP specification table.
881 *
882 * sets:
883 *	boot_cpu_id
884 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
885 *	CPU_TO_ID(N), logical CPU to APIC ID table
886 *	IO_TO_ID(N), logical IO to APIC ID table
887 *	bus_data[N]
888 *	io_apic_ints[N]
889 */
890static int
891mptable_pass2(void)
892{
893	int     x;
894	mpcth_t cth;
895	int     totalSize;
896	void*   position;
897	int     count;
898	int     type;
899	int     apic, bus, cpu, intr;
900	int	i, j;
901	int	pgeflag;
902
903	POSTCODE(MPTABLE_PASS2_POST);
904
905	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
906
907	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
908	    M_DEVBUF, M_WAITOK);
909	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
910	    M_DEVBUF, M_WAITOK);
911	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
912	    M_DEVBUF, M_WAITOK);
913	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
914	    M_DEVBUF, M_WAITOK);
915
916	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
917
918	for (i = 0; i < mp_napics; i++) {
919		for (j = 0; j < mp_napics; j++) {
920			/* same page frame as a previous IO apic? */
921			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
922			    (io_apic_address[i] & PG_FRAME)) {
923				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
924					+ (NPTEPG-2-j) * PAGE_SIZE
925					+ (io_apic_address[i] & PAGE_MASK));
926				break;
927			}
928			/* use this slot if available */
929			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
930				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
931				    pgeflag | (io_apic_address[i] & PG_FRAME));
932				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
933					+ (NPTEPG-2-j) * PAGE_SIZE
934					+ (io_apic_address[i] & PAGE_MASK));
935				break;
936			}
937		}
938	}
939
940	/* clear various tables */
941	for (x = 0; x < NAPICID; ++x) {
942		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
943		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
944		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
945	}
946
947	/* clear bus data table */
948	for (x = 0; x < mp_nbusses; ++x)
949		bus_data[x].bus_id = 0xff;
950
951	/* clear IO APIC INT table */
952	for (x = 0; x < (nintrs + 1); ++x) {
953		io_apic_ints[x].int_type = 0xff;
954		io_apic_ints[x].int_vector = 0xff;
955	}
956
957	/* setup the cpu/apic mapping arrays */
958	boot_cpu_id = -1;
959
960	/* record whether PIC or virtual-wire mode */
961	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
962
963	/* check for use of 'default' configuration */
964	if (MPFPS_MPFB1 != 0)
965		return MPFPS_MPFB1;	/* return default configuration type */
966
967	if ((cth = mpfps->pap) == 0)
968		panic("MP Configuration Table Header MISSING!");
969
970	/* walk the table, recording info of interest */
971	totalSize = cth->base_table_length - sizeof(struct MPCTH);
972	position = (u_char *) cth + sizeof(struct MPCTH);
973	count = cth->entry_count;
974	apic = bus = intr = 0;
975	cpu = 1;				/* pre-count the BSP */
976
977	while (count--) {
978		switch (type = *(u_char *) position) {
979		case 0:
980			if (processor_entry(position, cpu))
981				++cpu;
982			break;
983		case 1:
984			if (bus_entry(position, bus))
985				++bus;
986			break;
987		case 2:
988			if (io_apic_entry(position, apic))
989				++apic;
990			break;
991		case 3:
992			if (int_entry(position, intr))
993				++intr;
994			break;
995		case 4:
996			/* int_entry(position); */
997			break;
998		default:
999			panic("mpfps Base Table HOSED!");
1000			/* NOTREACHED */
1001		}
1002
1003		totalSize -= basetable_entry_types[type].length;
1004		(u_char *) position += basetable_entry_types[type].length;
1005	}
1006
1007	if (boot_cpu_id == -1)
1008		panic("NO BSP found!");
1009
1010	/* report fact that its NOT a default configuration */
1011	return 0;
1012}
1013
1014
1015void
1016assign_apic_irq(int apic, int intpin, int irq)
1017{
1018	int x;
1019
1020	if (int_to_apicintpin[irq].ioapic != -1)
1021		panic("assign_apic_irq: inconsistent table");
1022
1023	int_to_apicintpin[irq].ioapic = apic;
1024	int_to_apicintpin[irq].int_pin = intpin;
1025	int_to_apicintpin[irq].apic_address = ioapic[apic];
1026	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1027
1028	for (x = 0; x < nintrs; x++) {
1029		if ((io_apic_ints[x].int_type == 0 ||
1030		     io_apic_ints[x].int_type == 3) &&
1031		    io_apic_ints[x].int_vector == 0xff &&
1032		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1033		    io_apic_ints[x].dst_apic_int == intpin)
1034			io_apic_ints[x].int_vector = irq;
1035	}
1036}
1037
1038void
1039revoke_apic_irq(int irq)
1040{
1041	int x;
1042	int oldapic;
1043	int oldintpin;
1044
1045	if (int_to_apicintpin[irq].ioapic == -1)
1046		panic("assign_apic_irq: inconsistent table");
1047
1048	oldapic = int_to_apicintpin[irq].ioapic;
1049	oldintpin = int_to_apicintpin[irq].int_pin;
1050
1051	int_to_apicintpin[irq].ioapic = -1;
1052	int_to_apicintpin[irq].int_pin = 0;
1053	int_to_apicintpin[irq].apic_address = NULL;
1054	int_to_apicintpin[irq].redirindex = 0;
1055
1056	for (x = 0; x < nintrs; x++) {
1057		if ((io_apic_ints[x].int_type == 0 ||
1058		     io_apic_ints[x].int_type == 3) &&
1059		    io_apic_ints[x].int_vector == 0xff &&
1060		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1061		    io_apic_ints[x].dst_apic_int == oldintpin)
1062			io_apic_ints[x].int_vector = 0xff;
1063	}
1064}
1065
1066
1067
1068static void
1069swap_apic_id(int apic, int oldid, int newid)
1070{
1071	int x;
1072	int oapic;
1073
1074
1075	if (oldid == newid)
1076		return;			/* Nothing to do */
1077
1078	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1079	       apic, oldid, newid);
1080
1081	/* Swap physical APIC IDs in interrupt entries */
1082	for (x = 0; x < nintrs; x++) {
1083		if (io_apic_ints[x].dst_apic_id == oldid)
1084			io_apic_ints[x].dst_apic_id = newid;
1085		else if (io_apic_ints[x].dst_apic_id == newid)
1086			io_apic_ints[x].dst_apic_id = oldid;
1087	}
1088
1089	/* Swap physical APIC IDs in IO_TO_ID mappings */
1090	for (oapic = 0; oapic < mp_napics; oapic++)
1091		if (IO_TO_ID(oapic) == newid)
1092			break;
1093
1094	if (oapic < mp_napics) {
1095		printf("Changing APIC ID for IO APIC #%d from "
1096		       "%d to %d in MP table\n",
1097		       oapic, newid, oldid);
1098		IO_TO_ID(oapic) = oldid;
1099	}
1100	IO_TO_ID(apic) = newid;
1101}
1102
1103
1104static void
1105fix_id_to_io_mapping(void)
1106{
1107	int x;
1108
1109	for (x = 0; x < NAPICID; x++)
1110		ID_TO_IO(x) = -1;
1111
1112	for (x = 0; x <= mp_naps; x++)
1113		if (CPU_TO_ID(x) < NAPICID)
1114			ID_TO_IO(CPU_TO_ID(x)) = x;
1115
1116	for (x = 0; x < mp_napics; x++)
1117		if (IO_TO_ID(x) < NAPICID)
1118			ID_TO_IO(IO_TO_ID(x)) = x;
1119}
1120
1121
1122static int
1123first_free_apic_id(void)
1124{
1125	int freeid, x;
1126
1127	for (freeid = 0; freeid < NAPICID; freeid++) {
1128		for (x = 0; x <= mp_naps; x++)
1129			if (CPU_TO_ID(x) == freeid)
1130				break;
1131		if (x <= mp_naps)
1132			continue;
1133		for (x = 0; x < mp_napics; x++)
1134			if (IO_TO_ID(x) == freeid)
1135				break;
1136		if (x < mp_napics)
1137			continue;
1138		return freeid;
1139	}
1140	return freeid;
1141}
1142
1143
1144static int
1145io_apic_id_acceptable(int apic, int id)
1146{
1147	int cpu;		/* Logical CPU number */
1148	int oapic;		/* Logical IO APIC number for other IO APIC */
1149
1150	if (id >= NAPICID)
1151		return 0;	/* Out of range */
1152
1153	for (cpu = 0; cpu <= mp_naps; cpu++)
1154		if (CPU_TO_ID(cpu) == id)
1155			return 0;	/* Conflict with CPU */
1156
1157	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1158		if (IO_TO_ID(oapic) == id)
1159			return 0;	/* Conflict with other APIC */
1160
1161	return 1;		/* ID is acceptable for IO APIC */
1162}
1163
1164
1165/*
1166 * parse an Intel MP specification table
1167 */
1168static void
1169fix_mp_table(void)
1170{
1171	int	x;
1172	int	id;
1173	int	bus_0 = 0;	/* Stop GCC warning */
1174	int	bus_pci = 0;	/* Stop GCC warning */
1175	int	num_pci_bus;
1176	int	apic;		/* IO APIC unit number */
1177	int     freeid;		/* Free physical APIC ID */
1178	int	physid;		/* Current physical IO APIC ID */
1179
1180	/*
1181	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1182	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1183	 * exists the BIOS must begin with bus entries for the PCI bus and use
1184	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1185	 * exists the BIOS can choose to ignore this ordering, and indeed many
1186	 * MP motherboards do ignore it.  This causes a problem when the PCI
1187	 * sub-system makes requests of the MP sub-system based on PCI bus
1188	 * numbers.	So here we look for the situation and renumber the
1189	 * busses and associated INTs in an effort to "make it right".
1190	 */
1191
1192	/* find bus 0, PCI bus, count the number of PCI busses */
1193	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1194		if (bus_data[x].bus_id == 0) {
1195			bus_0 = x;
1196		}
1197		if (bus_data[x].bus_type == PCI) {
1198			++num_pci_bus;
1199			bus_pci = x;
1200		}
1201	}
1202	/*
1203	 * bus_0 == slot of bus with ID of 0
1204	 * bus_pci == slot of last PCI bus encountered
1205	 */
1206
1207	/* check the 1 PCI bus case for sanity */
1208	/* if it is number 0 all is well */
1209	if (num_pci_bus == 1 &&
1210	    bus_data[bus_pci].bus_id != 0) {
1211
1212		/* mis-numbered, swap with whichever bus uses slot 0 */
1213
1214		/* swap the bus entry types */
1215		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1216		bus_data[bus_0].bus_type = PCI;
1217
1218		/* swap each relavant INTerrupt entry */
1219		id = bus_data[bus_pci].bus_id;
1220		for (x = 0; x < nintrs; ++x) {
1221			if (io_apic_ints[x].src_bus_id == id) {
1222				io_apic_ints[x].src_bus_id = 0;
1223			}
1224			else if (io_apic_ints[x].src_bus_id == 0) {
1225				io_apic_ints[x].src_bus_id = id;
1226			}
1227		}
1228	}
1229
1230	/* Assign IO APIC IDs.
1231	 *
1232	 * First try the existing ID. If a conflict is detected, try
1233	 * the ID in the MP table.  If a conflict is still detected, find
1234	 * a free id.
1235	 *
1236	 * We cannot use the ID_TO_IO table before all conflicts has been
1237	 * resolved and the table has been corrected.
1238	 */
1239	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1240
1241		/* First try to use the value set by the BIOS */
1242		physid = io_apic_get_id(apic);
1243		if (io_apic_id_acceptable(apic, physid)) {
1244			if (IO_TO_ID(apic) != physid)
1245				swap_apic_id(apic, IO_TO_ID(apic), physid);
1246			continue;
1247		}
1248
1249		/* Then check if the value in the MP table is acceptable */
1250		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1251			continue;
1252
1253		/* Last resort, find a free APIC ID and use it */
1254		freeid = first_free_apic_id();
1255		if (freeid >= NAPICID)
1256			panic("No free physical APIC IDs found");
1257
1258		if (io_apic_id_acceptable(apic, freeid)) {
1259			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1260			continue;
1261		}
1262		panic("Free physical APIC ID not usable");
1263	}
1264	fix_id_to_io_mapping();
1265
1266	/* detect and fix broken Compaq MP table */
1267	if (apic_int_type(0, 0) == -1) {
1268		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1269		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1270		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1271		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1272		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1273		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1274		nintrs++;
1275	}
1276}
1277
1278
1279/* Assign low level interrupt handlers */
1280static void
1281setup_apic_irq_mapping(void)
1282{
1283	int	x;
1284	int	int_vector;
1285
1286	/* Clear array */
1287	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1288		int_to_apicintpin[x].ioapic = -1;
1289		int_to_apicintpin[x].int_pin = 0;
1290		int_to_apicintpin[x].apic_address = NULL;
1291		int_to_apicintpin[x].redirindex = 0;
1292	}
1293
1294	/* First assign ISA/EISA interrupts */
1295	for (x = 0; x < nintrs; x++) {
1296		int_vector = io_apic_ints[x].src_bus_irq;
1297		if (int_vector < APIC_INTMAPSIZE &&
1298		    io_apic_ints[x].int_vector == 0xff &&
1299		    int_to_apicintpin[int_vector].ioapic == -1 &&
1300		    (apic_int_is_bus_type(x, ISA) ||
1301		     apic_int_is_bus_type(x, EISA)) &&
1302		    io_apic_ints[x].int_type == 0) {
1303			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1304					io_apic_ints[x].dst_apic_int,
1305					int_vector);
1306		}
1307	}
1308
1309	/* Assign first set of interrupts to intpins on IOAPIC #0 */
1310	for (x = 0; x < nintrs; x++) {
1311		int_vector = io_apic_ints[x].dst_apic_int;
1312		if (int_vector < APIC_INTMAPSIZE &&
1313		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1314		    io_apic_ints[x].int_vector == 0xff &&
1315		    int_to_apicintpin[int_vector].ioapic == -1 &&
1316		    (io_apic_ints[x].int_type == 0 ||
1317		     io_apic_ints[x].int_type == 3)) {
1318			assign_apic_irq(0,
1319					io_apic_ints[x].dst_apic_int,
1320					int_vector);
1321		}
1322	}
1323	/*
1324	 * Assign interrupts for remaining intpins.
1325	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1326	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1327	 * due to 8254 interrupts not being delivered can reuse that low level
1328	 * interrupt handler.
1329	 */
1330	int_vector = 0;
1331	while (int_vector < APIC_INTMAPSIZE &&
1332	       int_to_apicintpin[int_vector].ioapic != -1)
1333		int_vector++;
1334	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1335		if ((io_apic_ints[x].int_type == 0 ||
1336		     (io_apic_ints[x].int_type == 3 &&
1337		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1338		       io_apic_ints[x].dst_apic_int != 0))) &&
1339		    io_apic_ints[x].int_vector == 0xff) {
1340			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1341					io_apic_ints[x].dst_apic_int,
1342					int_vector);
1343			int_vector++;
1344			while (int_vector < APIC_INTMAPSIZE &&
1345			       int_to_apicintpin[int_vector].ioapic != -1)
1346				int_vector++;
1347		}
1348	}
1349}
1350
1351
1352static int
1353processor_entry(proc_entry_ptr entry, int cpu)
1354{
1355	/* check for usability */
1356	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1357		return 0;
1358
1359	if(entry->apic_id >= NAPICID)
1360		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1361	/* check for BSP flag */
1362	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1363		boot_cpu_id = entry->apic_id;
1364		CPU_TO_ID(0) = entry->apic_id;
1365		ID_TO_CPU(entry->apic_id) = 0;
1366		return 0;	/* its already been counted */
1367	}
1368
1369	/* add another AP to list, if less than max number of CPUs */
1370	else if (cpu < MAXCPU) {
1371		CPU_TO_ID(cpu) = entry->apic_id;
1372		ID_TO_CPU(entry->apic_id) = cpu;
1373		return 1;
1374	}
1375
1376	return 0;
1377}
1378
1379
1380static int
1381bus_entry(bus_entry_ptr entry, int bus)
1382{
1383	int     x;
1384	char    c, name[8];
1385
1386	/* encode the name into an index */
1387	for (x = 0; x < 6; ++x) {
1388		if ((c = entry->bus_type[x]) == ' ')
1389			break;
1390		name[x] = c;
1391	}
1392	name[x] = '\0';
1393
1394	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1395		panic("unknown bus type: '%s'", name);
1396
1397	bus_data[bus].bus_id = entry->bus_id;
1398	bus_data[bus].bus_type = x;
1399
1400	return 1;
1401}
1402
1403
1404static int
1405io_apic_entry(io_apic_entry_ptr entry, int apic)
1406{
1407	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1408		return 0;
1409
1410	IO_TO_ID(apic) = entry->apic_id;
1411	if (entry->apic_id < NAPICID)
1412		ID_TO_IO(entry->apic_id) = apic;
1413
1414	return 1;
1415}
1416
1417
1418static int
1419lookup_bus_type(char *name)
1420{
1421	int     x;
1422
1423	for (x = 0; x < MAX_BUSTYPE; ++x)
1424		if (strcmp(bus_type_table[x].name, name) == 0)
1425			return bus_type_table[x].type;
1426
1427	return UNKNOWN_BUSTYPE;
1428}
1429
1430
1431static int
1432int_entry(int_entry_ptr entry, int intr)
1433{
1434	int apic;
1435
1436	io_apic_ints[intr].int_type = entry->int_type;
1437	io_apic_ints[intr].int_flags = entry->int_flags;
1438	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1439	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1440	if (entry->dst_apic_id == 255) {
1441		/* This signal goes to all IO APICS.  Select an IO APIC
1442		   with sufficient number of interrupt pins */
1443		for (apic = 0; apic < mp_napics; apic++)
1444			if (((io_apic_read(apic, IOAPIC_VER) &
1445			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1446			    entry->dst_apic_int)
1447				break;
1448		if (apic < mp_napics)
1449			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1450		else
1451			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1452	} else
1453		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1454	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1455
1456	return 1;
1457}
1458
1459
1460static int
1461apic_int_is_bus_type(int intr, int bus_type)
1462{
1463	int     bus;
1464
1465	for (bus = 0; bus < mp_nbusses; ++bus)
1466		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1467		    && ((int) bus_data[bus].bus_type == bus_type))
1468			return 1;
1469
1470	return 0;
1471}
1472
1473
1474/*
1475 * Given a traditional ISA INT mask, return an APIC mask.
1476 */
1477u_int
1478isa_apic_mask(u_int isa_mask)
1479{
1480	int isa_irq;
1481	int apic_pin;
1482
1483#if defined(SKIP_IRQ15_REDIRECT)
1484	if (isa_mask == (1 << 15)) {
1485		printf("skipping ISA IRQ15 redirect\n");
1486		return isa_mask;
1487	}
1488#endif  /* SKIP_IRQ15_REDIRECT */
1489
1490	isa_irq = ffs(isa_mask);		/* find its bit position */
1491	if (isa_irq == 0)			/* doesn't exist */
1492		return 0;
1493	--isa_irq;				/* make it zero based */
1494
1495	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1496	if (apic_pin == -1)
1497		return 0;
1498
1499	return (1 << apic_pin);			/* convert pin# to a mask */
1500}
1501
1502
1503/*
1504 * Determine which APIC pin an ISA/EISA INT is attached to.
1505 */
1506#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1507#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1508#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1509#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1510
1511#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1512int
1513isa_apic_irq(int isa_irq)
1514{
1515	int     intr;
1516
1517	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1518		if (INTTYPE(intr) == 0) {		/* standard INT */
1519			if (SRCBUSIRQ(intr) == isa_irq) {
1520				if (apic_int_is_bus_type(intr, ISA) ||
1521			            apic_int_is_bus_type(intr, EISA))
1522					return INTIRQ(intr);	/* found */
1523			}
1524		}
1525	}
1526	return -1;					/* NOT found */
1527}
1528
1529
1530/*
1531 * Determine which APIC pin a PCI INT is attached to.
1532 */
1533#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1534#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1535#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1536int
1537pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1538{
1539	int     intr;
1540
1541	--pciInt;					/* zero based */
1542
1543	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1544		if ((INTTYPE(intr) == 0)		/* standard INT */
1545		    && (SRCBUSID(intr) == pciBus)
1546		    && (SRCBUSDEVICE(intr) == pciDevice)
1547		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1548			if (apic_int_is_bus_type(intr, PCI))
1549				return INTIRQ(intr);	/* exact match */
1550
1551	return -1;					/* NOT found */
1552}
1553
1554int
1555next_apic_irq(int irq)
1556{
1557	int intr, ointr;
1558	int bus, bustype;
1559
1560	bus = 0;
1561	bustype = 0;
1562	for (intr = 0; intr < nintrs; intr++) {
1563		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1564			continue;
1565		bus = SRCBUSID(intr);
1566		bustype = apic_bus_type(bus);
1567		if (bustype != ISA &&
1568		    bustype != EISA &&
1569		    bustype != PCI)
1570			continue;
1571		break;
1572	}
1573	if (intr >= nintrs) {
1574		return -1;
1575	}
1576	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1577		if (INTTYPE(ointr) != 0)
1578			continue;
1579		if (bus != SRCBUSID(ointr))
1580			continue;
1581		if (bustype == PCI) {
1582			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1583				continue;
1584			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1585				continue;
1586		}
1587		if (bustype == ISA || bustype == EISA) {
1588			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1589				continue;
1590		}
1591		if (INTPIN(intr) == INTPIN(ointr))
1592			continue;
1593		break;
1594	}
1595	if (ointr >= nintrs) {
1596		return -1;
1597	}
1598	return INTIRQ(ointr);
1599}
1600#undef SRCBUSLINE
1601#undef SRCBUSDEVICE
1602#undef SRCBUSID
1603#undef SRCBUSIRQ
1604
1605#undef INTPIN
1606#undef INTIRQ
1607#undef INTAPIC
1608#undef INTTYPE
1609
1610
1611/*
1612 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1613 *
1614 * XXX FIXME:
1615 *  Exactly what this means is unclear at this point.  It is a solution
1616 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1617 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1618 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1619 *  option.
1620 */
1621int
1622undirect_isa_irq(int rirq)
1623{
1624#if defined(READY)
1625	if (bootverbose)
1626	    printf("Freeing redirected ISA irq %d.\n", rirq);
1627	/** FIXME: tickle the MB redirector chip */
1628	return -1;
1629#else
1630	if (bootverbose)
1631	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1632	return 0;
1633#endif  /* READY */
1634}
1635
1636
1637/*
1638 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1639 */
1640int
1641undirect_pci_irq(int rirq)
1642{
1643#if defined(READY)
1644	if (bootverbose)
1645		printf("Freeing redirected PCI irq %d.\n", rirq);
1646
1647	/** FIXME: tickle the MB redirector chip */
1648	return -1;
1649#else
1650	if (bootverbose)
1651		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1652		       rirq);
1653	return 0;
1654#endif  /* READY */
1655}
1656
1657
1658/*
1659 * given a bus ID, return:
1660 *  the bus type if found
1661 *  -1 if NOT found
1662 */
1663int
1664apic_bus_type(int id)
1665{
1666	int     x;
1667
1668	for (x = 0; x < mp_nbusses; ++x)
1669		if (bus_data[x].bus_id == id)
1670			return bus_data[x].bus_type;
1671
1672	return -1;
1673}
1674
1675
1676/*
1677 * given a LOGICAL APIC# and pin#, return:
1678 *  the associated src bus ID if found
1679 *  -1 if NOT found
1680 */
1681int
1682apic_src_bus_id(int apic, int pin)
1683{
1684	int     x;
1685
1686	/* search each of the possible INTerrupt sources */
1687	for (x = 0; x < nintrs; ++x)
1688		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1689		    (pin == io_apic_ints[x].dst_apic_int))
1690			return (io_apic_ints[x].src_bus_id);
1691
1692	return -1;		/* NOT found */
1693}
1694
1695
1696/*
1697 * given a LOGICAL APIC# and pin#, return:
1698 *  the associated src bus IRQ if found
1699 *  -1 if NOT found
1700 */
1701int
1702apic_src_bus_irq(int apic, int pin)
1703{
1704	int     x;
1705
1706	for (x = 0; x < nintrs; x++)
1707		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1708		    (pin == io_apic_ints[x].dst_apic_int))
1709			return (io_apic_ints[x].src_bus_irq);
1710
1711	return -1;		/* NOT found */
1712}
1713
1714
1715/*
1716 * given a LOGICAL APIC# and pin#, return:
1717 *  the associated INTerrupt type if found
1718 *  -1 if NOT found
1719 */
1720int
1721apic_int_type(int apic, int pin)
1722{
1723	int     x;
1724
1725	/* search each of the possible INTerrupt sources */
1726	for (x = 0; x < nintrs; ++x)
1727		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1728		    (pin == io_apic_ints[x].dst_apic_int))
1729			return (io_apic_ints[x].int_type);
1730
1731	return -1;		/* NOT found */
1732}
1733
1734int
1735apic_irq(int apic, int pin)
1736{
1737	int x;
1738	int res;
1739
1740	for (x = 0; x < nintrs; ++x)
1741		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1742		    (pin == io_apic_ints[x].dst_apic_int)) {
1743			res = io_apic_ints[x].int_vector;
1744			if (res == 0xff)
1745				return -1;
1746			if (apic != int_to_apicintpin[res].ioapic)
1747				panic("apic_irq: inconsistent table");
1748			if (pin != int_to_apicintpin[res].int_pin)
1749				panic("apic_irq inconsistent table (2)");
1750			return res;
1751		}
1752	return -1;
1753}
1754
1755
1756/*
1757 * given a LOGICAL APIC# and pin#, return:
1758 *  the associated trigger mode if found
1759 *  -1 if NOT found
1760 */
1761int
1762apic_trigger(int apic, int pin)
1763{
1764	int     x;
1765
1766	/* search each of the possible INTerrupt sources */
1767	for (x = 0; x < nintrs; ++x)
1768		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1769		    (pin == io_apic_ints[x].dst_apic_int))
1770			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1771
1772	return -1;		/* NOT found */
1773}
1774
1775
1776/*
1777 * given a LOGICAL APIC# and pin#, return:
1778 *  the associated 'active' level if found
1779 *  -1 if NOT found
1780 */
1781int
1782apic_polarity(int apic, int pin)
1783{
1784	int     x;
1785
1786	/* search each of the possible INTerrupt sources */
1787	for (x = 0; x < nintrs; ++x)
1788		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1789		    (pin == io_apic_ints[x].dst_apic_int))
1790			return (io_apic_ints[x].int_flags & 0x03);
1791
1792	return -1;		/* NOT found */
1793}
1794
1795
1796/*
1797 * set data according to MP defaults
1798 * FIXME: probably not complete yet...
1799 */
1800static void
1801default_mp_table(int type)
1802{
1803	int     ap_cpu_id;
1804#if defined(APIC_IO)
1805	int     io_apic_id;
1806	int     pin;
1807#endif	/* APIC_IO */
1808
1809#if 0
1810	printf("  MP default config type: %d\n", type);
1811	switch (type) {
1812	case 1:
1813		printf("   bus: ISA, APIC: 82489DX\n");
1814		break;
1815	case 2:
1816		printf("   bus: EISA, APIC: 82489DX\n");
1817		break;
1818	case 3:
1819		printf("   bus: EISA, APIC: 82489DX\n");
1820		break;
1821	case 4:
1822		printf("   bus: MCA, APIC: 82489DX\n");
1823		break;
1824	case 5:
1825		printf("   bus: ISA+PCI, APIC: Integrated\n");
1826		break;
1827	case 6:
1828		printf("   bus: EISA+PCI, APIC: Integrated\n");
1829		break;
1830	case 7:
1831		printf("   bus: MCA+PCI, APIC: Integrated\n");
1832		break;
1833	default:
1834		printf("   future type\n");
1835		break;
1836		/* NOTREACHED */
1837	}
1838#endif	/* 0 */
1839
1840	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1841	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1842
1843	/* BSP */
1844	CPU_TO_ID(0) = boot_cpu_id;
1845	ID_TO_CPU(boot_cpu_id) = 0;
1846
1847	/* one and only AP */
1848	CPU_TO_ID(1) = ap_cpu_id;
1849	ID_TO_CPU(ap_cpu_id) = 1;
1850
1851#if defined(APIC_IO)
1852	/* one and only IO APIC */
1853	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1854
1855	/*
1856	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1857	 * necessary as some hardware isn't properly setting up the IO APIC
1858	 */
1859#if defined(REALLY_ANAL_IOAPICID_VALUE)
1860	if (io_apic_id != 2) {
1861#else
1862	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1863#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1864		io_apic_set_id(0, 2);
1865		io_apic_id = 2;
1866	}
1867	IO_TO_ID(0) = io_apic_id;
1868	ID_TO_IO(io_apic_id) = 0;
1869#endif	/* APIC_IO */
1870
1871	/* fill out bus entries */
1872	switch (type) {
1873	case 1:
1874	case 2:
1875	case 3:
1876	case 4:
1877	case 5:
1878	case 6:
1879	case 7:
1880		bus_data[0].bus_id = default_data[type - 1][1];
1881		bus_data[0].bus_type = default_data[type - 1][2];
1882		bus_data[1].bus_id = default_data[type - 1][3];
1883		bus_data[1].bus_type = default_data[type - 1][4];
1884		break;
1885
1886	/* case 4: case 7:		   MCA NOT supported */
1887	default:		/* illegal/reserved */
1888		panic("BAD default MP config: %d", type);
1889		/* NOTREACHED */
1890	}
1891
1892#if defined(APIC_IO)
1893	/* general cases from MP v1.4, table 5-2 */
1894	for (pin = 0; pin < 16; ++pin) {
1895		io_apic_ints[pin].int_type = 0;
1896		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1897		io_apic_ints[pin].src_bus_id = 0;
1898		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1899		io_apic_ints[pin].dst_apic_id = io_apic_id;
1900		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1901	}
1902
1903	/* special cases from MP v1.4, table 5-2 */
1904	if (type == 2) {
1905		io_apic_ints[2].int_type = 0xff;	/* N/C */
1906		io_apic_ints[13].int_type = 0xff;	/* N/C */
1907#if !defined(APIC_MIXED_MODE)
1908		/** FIXME: ??? */
1909		panic("sorry, can't support type 2 default yet");
1910#endif	/* APIC_MIXED_MODE */
1911	}
1912	else
1913		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1914
1915	if (type == 7)
1916		io_apic_ints[0].int_type = 0xff;	/* N/C */
1917	else
1918		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1919#endif	/* APIC_IO */
1920}
1921
1922
1923/*
1924 * start each AP in our list
1925 */
1926static int
1927start_all_aps(u_int boot_addr)
1928{
1929	int     x, i, pg;
1930	u_char  mpbiosreason;
1931	u_long  mpbioswarmvec;
1932	struct globaldata *gd;
1933	char *stack;
1934
1935	POSTCODE(START_ALL_APS_POST);
1936
1937	/* initialize BSP's local APIC */
1938	apic_initialize();
1939	bsp_apic_ready = 1;
1940
1941	/* install the AP 1st level boot code */
1942	install_ap_tramp(boot_addr);
1943
1944
1945	/* save the current value of the warm-start vector */
1946	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1947#ifndef PC98
1948	outb(CMOS_REG, BIOS_RESET);
1949	mpbiosreason = inb(CMOS_DATA);
1950#endif
1951
1952	/* record BSP in CPU map */
1953	all_cpus = 1;
1954
1955	/* set up 0 -> 4MB P==V mapping for AP boot */
1956	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1957	invltlb();
1958
1959	/* start each AP */
1960	for (x = 1; x <= mp_naps; ++x) {
1961
1962		/* This is a bit verbose, it will go away soon.  */
1963
1964		/* first page of AP's private space */
1965		pg = x * i386_btop(sizeof(struct privatespace));
1966
1967		/* allocate a new private data page */
1968		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1969
1970		/* wire it into the private page table page */
1971		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1972
1973		/* allocate and set up an idle stack data page */
1974		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1975		for (i = 0; i < UPAGES; i++)
1976			SMPpt[pg + 5 + i] = (pt_entry_t)
1977			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1978
1979		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1980		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1981		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1982		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1983
1984		/* prime data page for it to use */
1985		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1986		gd->gd_cpuid = x;
1987		gd->gd_cpu_lockid = x << 24;
1988
1989		/* setup a vector to our boot code */
1990		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1991		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1992#ifndef PC98
1993		outb(CMOS_REG, BIOS_RESET);
1994		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
1995#endif
1996
1997		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
1998		bootAP = x;
1999
2000		/* attempt to start the Application Processor */
2001		CHECK_INIT(99);	/* setup checkpoints */
2002		if (!start_ap(x, boot_addr)) {
2003			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2004			CHECK_PRINT("trace");	/* show checkpoints */
2005			/* better panic as the AP may be running loose */
2006			printf("panic y/n? [y] ");
2007			if (cngetc() != 'n')
2008				panic("bye-bye");
2009		}
2010		CHECK_PRINT("trace");		/* show checkpoints */
2011
2012		/* record its version info */
2013		cpu_apic_versions[x] = cpu_apic_versions[0];
2014
2015		all_cpus |= (1 << x);		/* record AP in CPU map */
2016	}
2017
2018	/* build our map of 'other' CPUs */
2019	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2020
2021	/* fill in our (BSP) APIC version */
2022	cpu_apic_versions[0] = lapic.version;
2023
2024	/* restore the warmstart vector */
2025	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2026#ifndef PC98
2027	outb(CMOS_REG, BIOS_RESET);
2028	outb(CMOS_DATA, mpbiosreason);
2029#endif
2030
2031	/*
2032	 * Set up the idle context for the BSP.  Similar to above except
2033	 * that some was done by locore, some by pmap.c and some is implicit
2034	 * because the BSP is cpu#0 and the page is initially zero, and also
2035	 * because we can refer to variables by name on the BSP..
2036	 */
2037
2038	/* Allocate and setup BSP idle stack */
2039	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2040	for (i = 0; i < UPAGES; i++)
2041		SMPpt[5 + i] = (pt_entry_t)
2042		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2043
2044	*(int *)PTD = 0;
2045	pmap_set_opt();
2046
2047	/* number of APs actually started */
2048	return mp_ncpus - 1;
2049}
2050
2051
2052/*
2053 * load the 1st level AP boot code into base memory.
2054 */
2055
2056/* targets for relocation */
2057extern void bigJump(void);
2058extern void bootCodeSeg(void);
2059extern void bootDataSeg(void);
2060extern void MPentry(void);
2061extern u_int MP_GDT;
2062extern u_int mp_gdtbase;
2063
2064static void
2065install_ap_tramp(u_int boot_addr)
2066{
2067	int     x;
2068	int     size = *(int *) ((u_long) & bootMP_size);
2069	u_char *src = (u_char *) ((u_long) bootMP);
2070	u_char *dst = (u_char *) boot_addr + KERNBASE;
2071	u_int   boot_base = (u_int) bootMP;
2072	u_int8_t *dst8;
2073	u_int16_t *dst16;
2074	u_int32_t *dst32;
2075
2076	POSTCODE(INSTALL_AP_TRAMP_POST);
2077
2078	for (x = 0; x < size; ++x)
2079		*dst++ = *src++;
2080
2081	/*
2082	 * modify addresses in code we just moved to basemem. unfortunately we
2083	 * need fairly detailed info about mpboot.s for this to work.  changes
2084	 * to mpboot.s might require changes here.
2085	 */
2086
2087	/* boot code is located in KERNEL space */
2088	dst = (u_char *) boot_addr + KERNBASE;
2089
2090	/* modify the lgdt arg */
2091	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2092	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2093
2094	/* modify the ljmp target for MPentry() */
2095	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2096	*dst32 = ((u_int) MPentry - KERNBASE);
2097
2098	/* modify the target for boot code segment */
2099	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2100	dst8 = (u_int8_t *) (dst16 + 1);
2101	*dst16 = (u_int) boot_addr & 0xffff;
2102	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2103
2104	/* modify the target for boot data segment */
2105	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2106	dst8 = (u_int8_t *) (dst16 + 1);
2107	*dst16 = (u_int) boot_addr & 0xffff;
2108	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2109}
2110
2111
2112/*
2113 * this function starts the AP (application processor) identified
2114 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2115 * to accomplish this.  This is necessary because of the nuances
2116 * of the different hardware we might encounter.  It ain't pretty,
2117 * but it seems to work.
2118 */
2119static int
2120start_ap(int logical_cpu, u_int boot_addr)
2121{
2122	int     physical_cpu;
2123	int     vector;
2124	int     cpus;
2125	u_long  icr_lo, icr_hi;
2126
2127	POSTCODE(START_AP_POST);
2128
2129	/* get the PHYSICAL APIC ID# */
2130	physical_cpu = CPU_TO_ID(logical_cpu);
2131
2132	/* calculate the vector */
2133	vector = (boot_addr >> 12) & 0xff;
2134
2135	/* used as a watchpoint to signal AP startup */
2136	cpus = mp_ncpus;
2137
2138	/*
2139	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2140	 * and running the target CPU. OR this INIT IPI might be latched (P5
2141	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2142	 * ignored.
2143	 */
2144
2145	/* setup the address for the target AP */
2146	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2147	icr_hi |= (physical_cpu << 24);
2148	lapic.icr_hi = icr_hi;
2149
2150	/* do an INIT IPI: assert RESET */
2151	icr_lo = lapic.icr_lo & 0xfff00000;
2152	lapic.icr_lo = icr_lo | 0x0000c500;
2153
2154	/* wait for pending status end */
2155	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2156		 /* spin */ ;
2157
2158	/* do an INIT IPI: deassert RESET */
2159	lapic.icr_lo = icr_lo | 0x00008500;
2160
2161	/* wait for pending status end */
2162	u_sleep(10000);		/* wait ~10mS */
2163	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2164		 /* spin */ ;
2165
2166	/*
2167	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2168	 * latched, (P5 bug) this 1st STARTUP would then terminate
2169	 * immediately, and the previously started INIT IPI would continue. OR
2170	 * the previous INIT IPI has already run. and this STARTUP IPI will
2171	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2172	 * will run.
2173	 */
2174
2175	/* do a STARTUP IPI */
2176	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2177	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2178		 /* spin */ ;
2179	u_sleep(200);		/* wait ~200uS */
2180
2181	/*
2182	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2183	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2184	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2185	 * recognized after hardware RESET or INIT IPI.
2186	 */
2187
2188	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2189	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2190		 /* spin */ ;
2191	u_sleep(200);		/* wait ~200uS */
2192
2193	/* wait for it to start */
2194	set_apic_timer(5000000);/* == 5 seconds */
2195	while (read_apic_timer())
2196		if (mp_ncpus > cpus)
2197			return 1;	/* return SUCCESS */
2198
2199	return 0;		/* return FAILURE */
2200}
2201
2202/*
2203 * Flush the TLB on all other CPU's
2204 *
2205 * XXX: Needs to handshake and wait for completion before proceding.
2206 */
2207void
2208smp_invltlb(void)
2209{
2210#if defined(APIC_IO)
2211	if (smp_started && invltlb_ok)
2212		all_but_self_ipi(XINVLTLB_OFFSET);
2213#endif  /* APIC_IO */
2214}
2215
2216void
2217invlpg(u_int addr)
2218{
2219	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2220
2221	/* send a message to the other CPUs */
2222	smp_invltlb();
2223}
2224
2225void
2226invltlb(void)
2227{
2228	u_long  temp;
2229
2230	/*
2231	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2232	 * inlined.
2233	 */
2234	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2235
2236	/* send a message to the other CPUs */
2237	smp_invltlb();
2238}
2239
2240
2241/*
2242 * This is called once the rest of the system is up and running and we're
2243 * ready to let the AP's out of the pen.
2244 */
2245void
2246ap_init(void)
2247{
2248	u_int	apic_id;
2249
2250	/* lock against other AP's that are waking up */
2251	s_lock(&ap_boot_lock);
2252
2253	/* BSP may have changed PTD while we're waiting for the lock */
2254	cpu_invltlb();
2255
2256	smp_cpus++;
2257
2258#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2259	lidt(&r_idt);
2260#endif
2261
2262	/* Build our map of 'other' CPUs. */
2263	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2264
2265	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2266
2267	/* set up CPU registers and state */
2268	cpu_setregs();
2269
2270	/* set up FPU state on the AP */
2271	npxinit(__INITIAL_NPXCW__);
2272
2273	/* A quick check from sanity claus */
2274	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2275	if (PCPU_GET(cpuid) != apic_id) {
2276		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2277		printf("SMP: apic_id = %d\n", apic_id);
2278		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2279		panic("cpuid mismatch! boom!!");
2280	}
2281
2282	/* Init local apic for irq's */
2283	apic_initialize();
2284
2285	/* Set memory range attributes for this CPU to match the BSP */
2286	mem_range_AP_init();
2287
2288	/*
2289	 * Activate smp_invltlb, although strictly speaking, this isn't
2290	 * quite correct yet.  We should have a bitfield for cpus willing
2291	 * to accept TLB flush IPI's or something and sync them.
2292	 */
2293	if (smp_cpus == mp_ncpus) {
2294		invltlb_ok = 1;
2295		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2296		smp_active = 1;	 /* historic */
2297	}
2298
2299	/* let other AP's wake up now */
2300	s_unlock(&ap_boot_lock);
2301
2302	/* wait until all the AP's are up */
2303	while (smp_started == 0)
2304		; /* nothing */
2305
2306	/*
2307	 * Set curproc to our per-cpu idleproc so that mutexes have
2308	 * something unique to lock with.
2309	 */
2310	PCPU_SET(curproc, PCPU_GET(idleproc));
2311
2312	microuptime(PCPU_PTR(switchtime));
2313	PCPU_SET(switchticks, ticks);
2314
2315	/* ok, now grab sched_lock and enter the scheduler */
2316	enable_intr();
2317	mtx_enter(&sched_lock, MTX_SPIN);
2318	cpu_throw();	/* doesn't return */
2319
2320	panic("scheduler returned us to ap_init");
2321}
2322
2323#ifdef BETTER_CLOCK
2324
2325#define CHECKSTATE_USER	0
2326#define CHECKSTATE_SYS	1
2327#define CHECKSTATE_INTR	2
2328
2329/* Do not staticize.  Used from apic_vector.s */
2330struct proc*	checkstate_curproc[MAXCPU];
2331int		checkstate_cpustate[MAXCPU];
2332u_long		checkstate_pc[MAXCPU];
2333
2334#define PC_TO_INDEX(pc, prof)				\
2335        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2336            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2337
2338static void
2339addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2340{
2341	int i;
2342	struct uprof *prof;
2343	u_long pc;
2344
2345	pc = checkstate_pc[id];
2346	prof = &p->p_stats->p_prof;
2347	if (pc >= prof->pr_off &&
2348	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2349		mtx_assert(&sched_lock, MA_OWNED);
2350		if ((p->p_sflag & PS_OWEUPC) == 0) {
2351			prof->pr_addr = pc;
2352			prof->pr_ticks = 1;
2353			p->p_sflag |= PS_OWEUPC;
2354		}
2355		*astmap |= (1 << id);
2356	}
2357}
2358
2359static void
2360forwarded_statclock(int id, int pscnt, int *astmap)
2361{
2362	struct pstats *pstats;
2363	long rss;
2364	struct rusage *ru;
2365	struct vmspace *vm;
2366	int cpustate;
2367	struct proc *p;
2368#ifdef GPROF
2369	register struct gmonparam *g;
2370	int i;
2371#endif
2372
2373	mtx_assert(&sched_lock, MA_OWNED);
2374	p = checkstate_curproc[id];
2375	cpustate = checkstate_cpustate[id];
2376
2377	/* XXX */
2378	if (p->p_ithd)
2379		cpustate = CHECKSTATE_INTR;
2380	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2381		cpustate = CHECKSTATE_SYS;
2382
2383	switch (cpustate) {
2384	case CHECKSTATE_USER:
2385		if (p->p_sflag & PS_PROFIL)
2386			addupc_intr_forwarded(p, id, astmap);
2387		if (pscnt > 1)
2388			return;
2389		p->p_uticks++;
2390		if (p->p_nice > NZERO)
2391			cp_time[CP_NICE]++;
2392		else
2393			cp_time[CP_USER]++;
2394		break;
2395	case CHECKSTATE_SYS:
2396#ifdef GPROF
2397		/*
2398		 * Kernel statistics are just like addupc_intr, only easier.
2399		 */
2400		g = &_gmonparam;
2401		if (g->state == GMON_PROF_ON) {
2402			i = checkstate_pc[id] - g->lowpc;
2403			if (i < g->textsize) {
2404				i /= HISTFRACTION * sizeof(*g->kcount);
2405				g->kcount[i]++;
2406			}
2407		}
2408#endif
2409		if (pscnt > 1)
2410			return;
2411
2412		p->p_sticks++;
2413		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2414			cp_time[CP_IDLE]++;
2415		else
2416			cp_time[CP_SYS]++;
2417		break;
2418	case CHECKSTATE_INTR:
2419	default:
2420#ifdef GPROF
2421		/*
2422		 * Kernel statistics are just like addupc_intr, only easier.
2423		 */
2424		g = &_gmonparam;
2425		if (g->state == GMON_PROF_ON) {
2426			i = checkstate_pc[id] - g->lowpc;
2427			if (i < g->textsize) {
2428				i /= HISTFRACTION * sizeof(*g->kcount);
2429				g->kcount[i]++;
2430			}
2431		}
2432#endif
2433		if (pscnt > 1)
2434			return;
2435		KASSERT(p != NULL, ("NULL process in interrupt state"));
2436		p->p_iticks++;
2437		cp_time[CP_INTR]++;
2438	}
2439
2440	schedclock(p);
2441
2442	/* Update resource usage integrals and maximums. */
2443	if ((pstats = p->p_stats) != NULL &&
2444	    (ru = &pstats->p_ru) != NULL &&
2445	    (vm = p->p_vmspace) != NULL) {
2446		ru->ru_ixrss += pgtok(vm->vm_tsize);
2447		ru->ru_idrss += pgtok(vm->vm_dsize);
2448		ru->ru_isrss += pgtok(vm->vm_ssize);
2449		rss = pgtok(vmspace_resident_count(vm));
2450		if (ru->ru_maxrss < rss)
2451			ru->ru_maxrss = rss;
2452	}
2453}
2454
2455void
2456forward_statclock(int pscnt)
2457{
2458	int map;
2459	int id;
2460	int i;
2461
2462	/* Kludge. We don't yet have separate locks for the interrupts
2463	 * and the kernel. This means that we cannot let the other processors
2464	 * handle complex interrupts while inhibiting them from entering
2465	 * the kernel in a non-interrupt context.
2466	 *
2467	 * What we can do, without changing the locking mechanisms yet,
2468	 * is letting the other processors handle a very simple interrupt
2469	 * (wich determines the processor states), and do the main
2470	 * work ourself.
2471	 */
2472
2473	CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2474
2475	if (!smp_started || !invltlb_ok || cold || panicstr)
2476		return;
2477
2478	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2479
2480	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2481	checkstate_probed_cpus = 0;
2482	if (map != 0)
2483		selected_apic_ipi(map,
2484				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2485
2486	i = 0;
2487	while (checkstate_probed_cpus != map) {
2488		/* spin */
2489		i++;
2490		if (i == 100000) {
2491#ifdef BETTER_CLOCK_DIAGNOSTIC
2492			printf("forward_statclock: checkstate %x\n",
2493			       checkstate_probed_cpus);
2494#endif
2495			break;
2496		}
2497	}
2498
2499	/*
2500	 * Step 2: walk through other processors processes, update ticks and
2501	 * profiling info.
2502	 */
2503
2504	map = 0;
2505	for (id = 0; id < mp_ncpus; id++) {
2506		if (id == PCPU_GET(cpuid))
2507			continue;
2508		if (((1 << id) & checkstate_probed_cpus) == 0)
2509			continue;
2510		forwarded_statclock(id, pscnt, &map);
2511	}
2512	if (map != 0) {
2513		checkstate_need_ast |= map;
2514		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2515		i = 0;
2516		while ((checkstate_need_ast & map) != 0) {
2517			/* spin */
2518			i++;
2519			if (i > 100000) {
2520#ifdef BETTER_CLOCK_DIAGNOSTIC
2521				printf("forward_statclock: dropped ast 0x%x\n",
2522				       checkstate_need_ast & map);
2523#endif
2524				break;
2525			}
2526		}
2527	}
2528}
2529
2530void
2531forward_hardclock(int pscnt)
2532{
2533	int map;
2534	int id;
2535	struct proc *p;
2536	struct pstats *pstats;
2537	int i;
2538
2539	/* Kludge. We don't yet have separate locks for the interrupts
2540	 * and the kernel. This means that we cannot let the other processors
2541	 * handle complex interrupts while inhibiting them from entering
2542	 * the kernel in a non-interrupt context.
2543	 *
2544	 * What we can do, without changing the locking mechanisms yet,
2545	 * is letting the other processors handle a very simple interrupt
2546	 * (wich determines the processor states), and do the main
2547	 * work ourself.
2548	 */
2549
2550	CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2551
2552	if (!smp_started || !invltlb_ok || cold || panicstr)
2553		return;
2554
2555	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2556
2557	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2558	checkstate_probed_cpus = 0;
2559	if (map != 0)
2560		selected_apic_ipi(map,
2561				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2562
2563	i = 0;
2564	while (checkstate_probed_cpus != map) {
2565		/* spin */
2566		i++;
2567		if (i == 100000) {
2568#ifdef BETTER_CLOCK_DIAGNOSTIC
2569			printf("forward_hardclock: checkstate %x\n",
2570			       checkstate_probed_cpus);
2571#endif
2572			break;
2573		}
2574	}
2575
2576	/*
2577	 * Step 2: walk through other processors processes, update virtual
2578	 * timer and profiling timer. If stathz == 0, also update ticks and
2579	 * profiling info.
2580	 */
2581
2582	map = 0;
2583	for (id = 0; id < mp_ncpus; id++) {
2584		if (id == PCPU_GET(cpuid))
2585			continue;
2586		if (((1 << id) & checkstate_probed_cpus) == 0)
2587			continue;
2588		p = checkstate_curproc[id];
2589		if (p) {
2590			pstats = p->p_stats;
2591			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2592			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2593			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2594				p->p_sflag |= PS_ALRMPEND;
2595				map |= (1 << id);
2596			}
2597			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2598			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2599				p->p_sflag |= PS_PROFPEND;
2600				map |= (1 << id);
2601			}
2602		}
2603		if (stathz == 0) {
2604			forwarded_statclock( id, pscnt, &map);
2605		}
2606	}
2607	if (map != 0) {
2608		checkstate_need_ast |= map;
2609		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2610		i = 0;
2611		while ((checkstate_need_ast & map) != 0) {
2612			/* spin */
2613			i++;
2614			if (i > 100000) {
2615#ifdef BETTER_CLOCK_DIAGNOSTIC
2616				printf("forward_hardclock: dropped ast 0x%x\n",
2617				       checkstate_need_ast & map);
2618#endif
2619				break;
2620			}
2621		}
2622	}
2623}
2624
2625#endif /* BETTER_CLOCK */
2626
2627void
2628forward_signal(struct proc *p)
2629{
2630	int map;
2631	int id;
2632	int i;
2633
2634	/* Kludge. We don't yet have separate locks for the interrupts
2635	 * and the kernel. This means that we cannot let the other processors
2636	 * handle complex interrupts while inhibiting them from entering
2637	 * the kernel in a non-interrupt context.
2638	 *
2639	 * What we can do, without changing the locking mechanisms yet,
2640	 * is letting the other processors handle a very simple interrupt
2641	 * (wich determines the processor states), and do the main
2642	 * work ourself.
2643	 */
2644
2645	CTR1(KTR_SMP, "forward_signal(%p)", p);
2646
2647	if (!smp_started || !invltlb_ok || cold || panicstr)
2648		return;
2649	if (!forward_signal_enabled)
2650		return;
2651	mtx_enter(&sched_lock, MTX_SPIN);
2652	while (1) {
2653		if (p->p_stat != SRUN) {
2654			mtx_exit(&sched_lock, MTX_SPIN);
2655			return;
2656		}
2657		id = p->p_oncpu;
2658		mtx_exit(&sched_lock, MTX_SPIN);
2659		if (id == 0xff)
2660			return;
2661		map = (1<<id);
2662		checkstate_need_ast |= map;
2663		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2664		i = 0;
2665		while ((checkstate_need_ast & map) != 0) {
2666			/* spin */
2667			i++;
2668			if (i > 100000) {
2669#if 0
2670				printf("forward_signal: dropped ast 0x%x\n",
2671				       checkstate_need_ast & map);
2672#endif
2673				break;
2674			}
2675		}
2676		mtx_enter(&sched_lock, MTX_SPIN);
2677		if (id == p->p_oncpu) {
2678			mtx_exit(&sched_lock, MTX_SPIN);
2679			return;
2680		}
2681	}
2682}
2683
2684void
2685forward_roundrobin(void)
2686{
2687	u_int map;
2688	int i;
2689
2690	CTR0(KTR_SMP, "forward_roundrobin()");
2691
2692	if (!smp_started || !invltlb_ok || cold || panicstr)
2693		return;
2694	if (!forward_roundrobin_enabled)
2695		return;
2696	resched_cpus |= PCPU_GET(other_cpus);
2697	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2698#if 1
2699	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2700#else
2701	(void) all_but_self_ipi(XCPUAST_OFFSET);
2702#endif
2703	i = 0;
2704	while ((checkstate_need_ast & map) != 0) {
2705		/* spin */
2706		i++;
2707		if (i > 100000) {
2708#if 0
2709			printf("forward_roundrobin: dropped ast 0x%x\n",
2710			       checkstate_need_ast & map);
2711#endif
2712			break;
2713		}
2714	}
2715}
2716
2717/*
2718 * When called the executing CPU will send an IPI to all other CPUs
2719 *  requesting that they halt execution.
2720 *
2721 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2722 *
2723 *  - Signals all CPUs in map to stop.
2724 *  - Waits for each to stop.
2725 *
2726 * Returns:
2727 *  -1: error
2728 *   0: NA
2729 *   1: ok
2730 *
2731 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2732 *            from executing at same time.
2733 */
2734int
2735stop_cpus(u_int map)
2736{
2737	int count = 0;
2738
2739	if (!smp_started)
2740		return 0;
2741
2742	/* send the Xcpustop IPI to all CPUs in map */
2743	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2744
2745	while (count++ < 100000 && (stopped_cpus & map) != map)
2746		/* spin */ ;
2747
2748#ifdef DIAGNOSTIC
2749	if ((stopped_cpus & map) != map)
2750		printf("Warning: CPUs 0x%x did not stop!\n",
2751		    (~(stopped_cpus & map)) & map);
2752#endif
2753
2754	return 1;
2755}
2756
2757
2758/*
2759 * Called by a CPU to restart stopped CPUs.
2760 *
2761 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2762 *
2763 *  - Signals all CPUs in map to restart.
2764 *  - Waits for each to restart.
2765 *
2766 * Returns:
2767 *  -1: error
2768 *   0: NA
2769 *   1: ok
2770 */
2771int
2772restart_cpus(u_int map)
2773{
2774	int count = 0;
2775
2776	if (!smp_started)
2777		return 0;
2778
2779	started_cpus = map;		/* signal other cpus to restart */
2780
2781	/* wait for each to clear its bit */
2782	while (count++ < 100000 && (stopped_cpus & map) != 0)
2783		/* spin */ ;
2784
2785#ifdef DIAGNOSTIC
2786	if ((stopped_cpus & map) != 0)
2787		printf("Warning: CPUs 0x%x did not restart!\n",
2788		    (~(stopped_cpus & map)) & map);
2789#endif
2790
2791	return 1;
2792}
2793
2794
2795#ifdef APIC_INTR_REORDER
2796/*
2797 *	Maintain mapping from softintr vector to isr bit in local apic.
2798 */
2799void
2800set_lapic_isrloc(int intr, int vector)
2801{
2802	if (intr < 0 || intr > 32)
2803		panic("set_apic_isrloc: bad intr argument: %d",intr);
2804	if (vector < ICU_OFFSET || vector > 255)
2805		panic("set_apic_isrloc: bad vector argument: %d",vector);
2806	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2807	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2808}
2809#endif
2810
2811/*
2812 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2813 * (if specified), rendezvous, execute the action function (if specified),
2814 * rendezvous again, execute the teardown function (if specified), and then
2815 * resume.
2816 *
2817 * Note that the supplied external functions _must_ be reentrant and aware
2818 * that they are running in parallel and in an unknown lock context.
2819 */
2820static void (*smp_rv_setup_func)(void *arg);
2821static void (*smp_rv_action_func)(void *arg);
2822static void (*smp_rv_teardown_func)(void *arg);
2823static void *smp_rv_func_arg;
2824static volatile int smp_rv_waiters[2];
2825
2826void
2827smp_rendezvous_action(void)
2828{
2829	/* setup function */
2830	if (smp_rv_setup_func != NULL)
2831		smp_rv_setup_func(smp_rv_func_arg);
2832	/* spin on entry rendezvous */
2833	atomic_add_int(&smp_rv_waiters[0], 1);
2834	while (smp_rv_waiters[0] < mp_ncpus)
2835		;
2836	/* action function */
2837	if (smp_rv_action_func != NULL)
2838		smp_rv_action_func(smp_rv_func_arg);
2839	/* spin on exit rendezvous */
2840	atomic_add_int(&smp_rv_waiters[1], 1);
2841	while (smp_rv_waiters[1] < mp_ncpus)
2842		;
2843	/* teardown function */
2844	if (smp_rv_teardown_func != NULL)
2845		smp_rv_teardown_func(smp_rv_func_arg);
2846}
2847
2848void
2849smp_rendezvous(void (* setup_func)(void *),
2850	       void (* action_func)(void *),
2851	       void (* teardown_func)(void *),
2852	       void *arg)
2853{
2854	u_int	efl;
2855
2856	/* obtain rendezvous lock */
2857	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2858
2859	/* set static function pointers */
2860	smp_rv_setup_func = setup_func;
2861	smp_rv_action_func = action_func;
2862	smp_rv_teardown_func = teardown_func;
2863	smp_rv_func_arg = arg;
2864	smp_rv_waiters[0] = 0;
2865	smp_rv_waiters[1] = 0;
2866
2867	/* disable interrupts on this CPU, save interrupt status */
2868	efl = read_eflags();
2869	write_eflags(efl & ~PSL_I);
2870
2871	/* signal other processors, which will enter the IPI with interrupts off */
2872	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2873
2874	/* call executor function */
2875	smp_rendezvous_action();
2876
2877	/* restore interrupt flag */
2878	write_eflags(efl);
2879
2880	/* release lock */
2881	s_unlock(&smp_rv_lock);
2882}
2883
2884void
2885release_aps(void *dummy __unused)
2886{
2887	s_unlock(&ap_boot_lock);
2888}
2889
2890SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2891