mptable.c revision 58717
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/i386/mptable.c 58717 2000-03-28 07:16:37Z dillon $
26 */
27
28#include "opt_smp.h"
29#include "opt_cpu.h"
30#include "opt_user_ldt.h"
31
32#ifdef SMP
33#include <machine/smptests.h>
34#else
35#error
36#endif
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#ifdef BETTER_CLOCK
46#include <sys/dkstat.h>
47#endif
48#include <sys/cons.h>	/* cngetc() */
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/pmap.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_extern.h>
55#ifdef BETTER_CLOCK
56#include <sys/lock.h>
57#include <vm/vm_map.h>
58#include <sys/user.h>
59#ifdef GPROF
60#include <sys/gmon.h>
61#endif
62#endif
63
64#include <machine/smp.h>
65#include <machine/apic.h>
66#include <machine/atomic.h>
67#include <machine/cpufunc.h>
68#include <machine/mpapic.h>
69#include <machine/psl.h>
70#include <machine/segments.h>
71#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
72#include <machine/tss.h>
73#include <machine/specialreg.h>
74#include <machine/globaldata.h>
75
76#if defined(APIC_IO)
77#include <machine/md_var.h>		/* setidt() */
78#include <i386/isa/icu.h>		/* IPIs */
79#include <i386/isa/intr_machdep.h>	/* IPIs */
80#endif	/* APIC_IO */
81
82#if defined(TEST_DEFAULT_CONFIG)
83#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
84#else
85#define MPFPS_MPFB1	mpfps->mpfb1
86#endif  /* TEST_DEFAULT_CONFIG */
87
88#define WARMBOOT_TARGET		0
89#define WARMBOOT_OFF		(KERNBASE + 0x0467)
90#define WARMBOOT_SEG		(KERNBASE + 0x0469)
91
92#ifdef PC98
93#define BIOS_BASE		(0xe8000)
94#define BIOS_SIZE		(0x18000)
95#else
96#define BIOS_BASE		(0xf0000)
97#define BIOS_SIZE		(0x10000)
98#endif
99#define BIOS_COUNT		(BIOS_SIZE/4)
100
101#define CMOS_REG		(0x70)
102#define CMOS_DATA		(0x71)
103#define BIOS_RESET		(0x0f)
104#define BIOS_WARM		(0x0a)
105
106#define PROCENTRY_FLAG_EN	0x01
107#define PROCENTRY_FLAG_BP	0x02
108#define IOAPICENTRY_FLAG_EN	0x01
109
110
111/* MP Floating Pointer Structure */
112typedef struct MPFPS {
113	char    signature[4];
114	void   *pap;
115	u_char  length;
116	u_char  spec_rev;
117	u_char  checksum;
118	u_char  mpfb1;
119	u_char  mpfb2;
120	u_char  mpfb3;
121	u_char  mpfb4;
122	u_char  mpfb5;
123}      *mpfps_t;
124
125/* MP Configuration Table Header */
126typedef struct MPCTH {
127	char    signature[4];
128	u_short base_table_length;
129	u_char  spec_rev;
130	u_char  checksum;
131	u_char  oem_id[8];
132	u_char  product_id[12];
133	void   *oem_table_pointer;
134	u_short oem_table_size;
135	u_short entry_count;
136	void   *apic_address;
137	u_short extended_table_length;
138	u_char  extended_table_checksum;
139	u_char  reserved;
140}      *mpcth_t;
141
142
143typedef struct PROCENTRY {
144	u_char  type;
145	u_char  apic_id;
146	u_char  apic_version;
147	u_char  cpu_flags;
148	u_long  cpu_signature;
149	u_long  feature_flags;
150	u_long  reserved1;
151	u_long  reserved2;
152}      *proc_entry_ptr;
153
154typedef struct BUSENTRY {
155	u_char  type;
156	u_char  bus_id;
157	char    bus_type[6];
158}      *bus_entry_ptr;
159
160typedef struct IOAPICENTRY {
161	u_char  type;
162	u_char  apic_id;
163	u_char  apic_version;
164	u_char  apic_flags;
165	void   *apic_address;
166}      *io_apic_entry_ptr;
167
168typedef struct INTENTRY {
169	u_char  type;
170	u_char  int_type;
171	u_short int_flags;
172	u_char  src_bus_id;
173	u_char  src_bus_irq;
174	u_char  dst_apic_id;
175	u_char  dst_apic_int;
176}      *int_entry_ptr;
177
178/* descriptions of MP basetable entries */
179typedef struct BASETABLE_ENTRY {
180	u_char  type;
181	u_char  length;
182	char    name[16];
183}       basetable_entry;
184
185/*
186 * this code MUST be enabled here and in mpboot.s.
187 * it follows the very early stages of AP boot by placing values in CMOS ram.
188 * it NORMALLY will never be needed and thus the primitive method for enabling.
189 *
190#define CHECK_POINTS
191 */
192
193#if defined(CHECK_POINTS) && !defined(PC98)
194#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
195#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
196
197#define CHECK_INIT(D);				\
198	CHECK_WRITE(0x34, (D));			\
199	CHECK_WRITE(0x35, (D));			\
200	CHECK_WRITE(0x36, (D));			\
201	CHECK_WRITE(0x37, (D));			\
202	CHECK_WRITE(0x38, (D));			\
203	CHECK_WRITE(0x39, (D));
204
205#define CHECK_PRINT(S);				\
206	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
207	   (S),					\
208	   CHECK_READ(0x34),			\
209	   CHECK_READ(0x35),			\
210	   CHECK_READ(0x36),			\
211	   CHECK_READ(0x37),			\
212	   CHECK_READ(0x38),			\
213	   CHECK_READ(0x39));
214
215#else				/* CHECK_POINTS */
216
217#define CHECK_INIT(D)
218#define CHECK_PRINT(S)
219
220#endif				/* CHECK_POINTS */
221
222/*
223 * Values to send to the POST hardware.
224 */
225#define MP_BOOTADDRESS_POST	0x10
226#define MP_PROBE_POST		0x11
227#define MPTABLE_PASS1_POST	0x12
228
229#define MP_START_POST		0x13
230#define MP_ENABLE_POST		0x14
231#define MPTABLE_PASS2_POST	0x15
232
233#define START_ALL_APS_POST	0x16
234#define INSTALL_AP_TRAMP_POST	0x17
235#define START_AP_POST		0x18
236
237#define MP_ANNOUNCE_POST	0x19
238
239
240/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
241int	current_postcode;
242
243/** XXX FIXME: what system files declare these??? */
244extern struct region_descriptor r_gdt, r_idt;
245
246int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
247int	mp_ncpus;		/* # of CPUs, including BSP */
248int	mp_naps;		/* # of Applications processors */
249int	mp_nbusses;		/* # of busses */
250int	mp_napics;		/* # of IO APICs */
251int	boot_cpu_id;		/* designated BSP */
252vm_offset_t cpu_apic_address;
253vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
254extern	int nkpt;
255
256u_int32_t cpu_apic_versions[NCPU];
257u_int32_t io_apic_versions[NAPIC];
258
259#ifdef APIC_INTR_DIAGNOSTIC
260int apic_itrace_enter[32];
261int apic_itrace_tryisrlock[32];
262int apic_itrace_gotisrlock[32];
263int apic_itrace_active[32];
264int apic_itrace_masked[32];
265int apic_itrace_noisrlock[32];
266int apic_itrace_masked2[32];
267int apic_itrace_unmask[32];
268int apic_itrace_noforward[32];
269int apic_itrace_leave[32];
270int apic_itrace_enter2[32];
271int apic_itrace_doreti[32];
272int apic_itrace_splz[32];
273int apic_itrace_eoi[32];
274#ifdef APIC_INTR_DIAGNOSTIC_IRQ
275unsigned short apic_itrace_debugbuffer[32768];
276int apic_itrace_debugbuffer_idx;
277struct simplelock apic_itrace_debuglock;
278#endif
279#endif
280
281#ifdef APIC_INTR_REORDER
282struct {
283	volatile int *location;
284	int bit;
285} apic_isrbit_location[32];
286#endif
287
288struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
289
290/*
291 * APIC ID logical/physical mapping structures.
292 * We oversize these to simplify boot-time config.
293 */
294int     cpu_num_to_apic_id[NAPICID];
295int     io_num_to_apic_id[NAPICID];
296int     apic_id_to_logical[NAPICID];
297
298
299/* Bitmap of all available CPUs */
300u_int	all_cpus;
301
302/* AP uses this during bootstrap.  Do not staticize.  */
303char *bootSTK;
304static int bootAP;
305
306/* Hotwire a 0->4MB V==P mapping */
307extern pt_entry_t *KPTphys;
308
309/* SMP page table page */
310extern pt_entry_t *SMPpt;
311
312struct pcb stoppcbs[NCPU];
313
314int smp_started;		/* has the system started? */
315
316/*
317 * Local data and functions.
318 */
319
320static int	mp_capable;
321static u_int	boot_address;
322static u_int	base_memory;
323
324static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
325static mpfps_t	mpfps;
326static int	search_for_sig(u_int32_t target, int count);
327static void	mp_enable(u_int boot_addr);
328
329static int	mptable_pass1(void);
330static int	mptable_pass2(void);
331static void	default_mp_table(int type);
332static void	fix_mp_table(void);
333static void	setup_apic_irq_mapping(void);
334static void	init_locks(void);
335static int	start_all_aps(u_int boot_addr);
336static void	install_ap_tramp(u_int boot_addr);
337static int	start_ap(int logicalCpu, u_int boot_addr);
338static int	apic_int_is_bus_type(int intr, int bus_type);
339
340/*
341 * Calculate usable address in base memory for AP trampoline code.
342 */
343u_int
344mp_bootaddress(u_int basemem)
345{
346	POSTCODE(MP_BOOTADDRESS_POST);
347
348	base_memory = basemem * 1024;	/* convert to bytes */
349
350	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
351	if ((base_memory - boot_address) < bootMP_size)
352		boot_address -= 4096;	/* not enough, lower by 4k */
353
354	return boot_address;
355}
356
357
358/*
359 * Look for an Intel MP spec table (ie, SMP capable hardware).
360 */
361int
362mp_probe(void)
363{
364	int     x;
365	u_long  segment;
366	u_int32_t target;
367
368	POSTCODE(MP_PROBE_POST);
369
370	/* see if EBDA exists */
371	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
372		/* search first 1K of EBDA */
373		target = (u_int32_t) (segment << 4);
374		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
375			goto found;
376	} else {
377		/* last 1K of base memory, effective 'top of base' passed in */
378		target = (u_int32_t) (base_memory - 0x400);
379		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
380			goto found;
381	}
382
383	/* search the BIOS */
384	target = (u_int32_t) BIOS_BASE;
385	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
386		goto found;
387
388	/* nothing found */
389	mpfps = (mpfps_t)0;
390	mp_capable = 0;
391	return 0;
392
393found:
394	/* calculate needed resources */
395	mpfps = (mpfps_t)x;
396	if (mptable_pass1())
397		panic("you must reconfigure your kernel");
398
399	/* flag fact that we are running multiple processors */
400	mp_capable = 1;
401	return 1;
402}
403
404
405/*
406 * Startup the SMP processors.
407 */
408void
409mp_start(void)
410{
411	POSTCODE(MP_START_POST);
412
413	/* look for MP capable motherboard */
414	if (mp_capable)
415		mp_enable(boot_address);
416	else
417		panic("MP hardware not found!");
418}
419
420
421/*
422 * Print various information about the SMP system hardware and setup.
423 */
424void
425mp_announce(void)
426{
427	int     x;
428
429	POSTCODE(MP_ANNOUNCE_POST);
430
431	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
432	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
433	printf(", version: 0x%08x", cpu_apic_versions[0]);
434	printf(", at 0x%08x\n", cpu_apic_address);
435	for (x = 1; x <= mp_naps; ++x) {
436		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
437		printf(", version: 0x%08x", cpu_apic_versions[x]);
438		printf(", at 0x%08x\n", cpu_apic_address);
439	}
440
441#if defined(APIC_IO)
442	for (x = 0; x < mp_napics; ++x) {
443		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
444		printf(", version: 0x%08x", io_apic_versions[x]);
445		printf(", at 0x%08x\n", io_apic_address[x]);
446	}
447#else
448	printf(" Warning: APIC I/O disabled\n");
449#endif	/* APIC_IO */
450}
451
452/*
453 * AP cpu's call this to sync up protected mode.
454 */
455void
456init_secondary(void)
457{
458	int	gsel_tss;
459	int	x, myid = bootAP;
460
461	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
462	gdt_segs[GPROC0_SEL].ssd_base =
463		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
464	SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid];
465
466	for (x = 0; x < NGDT; x++) {
467		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
468	}
469
470	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
471	r_gdt.rd_base = (int) &gdt[myid * NGDT];
472	lgdt(&r_gdt);			/* does magic intra-segment return */
473
474	lidt(&r_idt);
475
476	lldt(_default_ldt);
477#ifdef USER_LDT
478	currentldt = _default_ldt;
479#endif
480
481	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
482	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
483	common_tss.tss_esp0 = 0;	/* not used until after switch */
484	common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
485	common_tss.tss_ioopt = (sizeof common_tss) << 16;
486	tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
487	common_tssd = *tss_gdt;
488	ltr(gsel_tss);
489
490	load_cr0(0x8005003b);		/* XXX! */
491
492	pmap_set_opt();
493}
494
495
496#if defined(APIC_IO)
497/*
498 * Final configuration of the BSP's local APIC:
499 *  - disable 'pic mode'.
500 *  - disable 'virtual wire mode'.
501 *  - enable NMI.
502 */
503void
504bsp_apic_configure(void)
505{
506	u_char		byte;
507	u_int32_t	temp;
508
509	/* leave 'pic mode' if necessary */
510	if (picmode) {
511		outb(0x22, 0x70);	/* select IMCR */
512		byte = inb(0x23);	/* current contents */
513		byte |= 0x01;		/* mask external INTR */
514		outb(0x23, byte);	/* disconnect 8259s/NMI */
515	}
516
517	/* mask lint0 (the 8259 'virtual wire' connection) */
518	temp = lapic.lvt_lint0;
519	temp |= APIC_LVT_M;		/* set the mask */
520	lapic.lvt_lint0 = temp;
521
522        /* setup lint1 to handle NMI */
523        temp = lapic.lvt_lint1;
524        temp &= ~APIC_LVT_M;		/* clear the mask */
525        lapic.lvt_lint1 = temp;
526
527	if (bootverbose)
528		apic_dump("bsp_apic_configure()");
529}
530#endif  /* APIC_IO */
531
532
533/*******************************************************************
534 * local functions and data
535 */
536
537/*
538 * start the SMP system
539 */
540static void
541mp_enable(u_int boot_addr)
542{
543	int     x;
544#if defined(APIC_IO)
545	int     apic;
546	u_int   ux;
547#endif	/* APIC_IO */
548
549	POSTCODE(MP_ENABLE_POST);
550
551	/* turn on 4MB of V == P addressing so we can get to MP table */
552	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
553	invltlb();
554
555	/* examine the MP table for needed info, uses physical addresses */
556	x = mptable_pass2();
557
558	*(int *)PTD = 0;
559	invltlb();
560
561	/* can't process default configs till the CPU APIC is pmapped */
562	if (x)
563		default_mp_table(x);
564
565	/* post scan cleanup */
566	fix_mp_table();
567	setup_apic_irq_mapping();
568
569#if defined(APIC_IO)
570
571	/* fill the LOGICAL io_apic_versions table */
572	for (apic = 0; apic < mp_napics; ++apic) {
573		ux = io_apic_read(apic, IOAPIC_VER);
574		io_apic_versions[apic] = ux;
575	}
576
577	/* program each IO APIC in the system */
578	for (apic = 0; apic < mp_napics; ++apic)
579		if (io_apic_setup(apic) < 0)
580			panic("IO APIC setup failure");
581
582	/* install a 'Spurious INTerrupt' vector */
583	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
584	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
585
586	/* install an inter-CPU IPI for TLB invalidation */
587	setidt(XINVLTLB_OFFSET, Xinvltlb,
588	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
589
590#ifdef BETTER_CLOCK
591	/* install an inter-CPU IPI for reading processor state */
592	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
593	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
594#endif
595
596	/* install an inter-CPU IPI for all-CPU rendezvous */
597	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
598	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
599
600	/* install an inter-CPU IPI for forcing an additional software trap */
601	setidt(XCPUAST_OFFSET, Xcpuast,
602	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
603
604	/* install an inter-CPU IPI for interrupt forwarding */
605	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
606	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
607
608	/* install an inter-CPU IPI for CPU stop/restart */
609	setidt(XCPUSTOP_OFFSET, Xcpustop,
610	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
611
612#if defined(TEST_TEST1)
613	/* install a "fake hardware INTerrupt" vector */
614	setidt(XTEST1_OFFSET, Xtest1,
615	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
616#endif  /** TEST_TEST1 */
617
618#endif	/* APIC_IO */
619
620	/* initialize all SMP locks */
621	init_locks();
622
623	/* start each Application Processor */
624	start_all_aps(boot_addr);
625
626	/*
627	 * The init process might be started on a different CPU now,
628	 * and the boot CPU might not call prepare_usermode to get
629	 * cr0 correctly configured. Thus we initialize cr0 here.
630	 */
631	load_cr0(rcr0() | CR0_WP | CR0_AM);
632}
633
634
635/*
636 * look for the MP spec signature
637 */
638
639/* string defined by the Intel MP Spec as identifying the MP table */
640#define MP_SIG		0x5f504d5f	/* _MP_ */
641#define NEXT(X)		((X) += 4)
642static int
643search_for_sig(u_int32_t target, int count)
644{
645	int     x;
646	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
647
648	for (x = 0; x < count; NEXT(x))
649		if (addr[x] == MP_SIG)
650			/* make array index a byte index */
651			return (target + (x * sizeof(u_int32_t)));
652
653	return -1;
654}
655
656
657static basetable_entry basetable_entry_types[] =
658{
659	{0, 20, "Processor"},
660	{1, 8, "Bus"},
661	{2, 8, "I/O APIC"},
662	{3, 8, "I/O INT"},
663	{4, 8, "Local INT"}
664};
665
666typedef struct BUSDATA {
667	u_char  bus_id;
668	enum busTypes bus_type;
669}       bus_datum;
670
671typedef struct INTDATA {
672	u_char  int_type;
673	u_short int_flags;
674	u_char  src_bus_id;
675	u_char  src_bus_irq;
676	u_char  dst_apic_id;
677	u_char  dst_apic_int;
678	u_char	int_vector;
679}       io_int, local_int;
680
681typedef struct BUSTYPENAME {
682	u_char  type;
683	char    name[7];
684}       bus_type_name;
685
686static bus_type_name bus_type_table[] =
687{
688	{CBUS, "CBUS"},
689	{CBUSII, "CBUSII"},
690	{EISA, "EISA"},
691	{MCA, "MCA"},
692	{UNKNOWN_BUSTYPE, "---"},
693	{ISA, "ISA"},
694	{MCA, "MCA"},
695	{UNKNOWN_BUSTYPE, "---"},
696	{UNKNOWN_BUSTYPE, "---"},
697	{UNKNOWN_BUSTYPE, "---"},
698	{UNKNOWN_BUSTYPE, "---"},
699	{UNKNOWN_BUSTYPE, "---"},
700	{PCI, "PCI"},
701	{UNKNOWN_BUSTYPE, "---"},
702	{UNKNOWN_BUSTYPE, "---"},
703	{UNKNOWN_BUSTYPE, "---"},
704	{UNKNOWN_BUSTYPE, "---"},
705	{XPRESS, "XPRESS"},
706	{UNKNOWN_BUSTYPE, "---"}
707};
708/* from MP spec v1.4, table 5-1 */
709static int default_data[7][5] =
710{
711/*   nbus, id0, type0, id1, type1 */
712	{1, 0, ISA, 255, 255},
713	{1, 0, EISA, 255, 255},
714	{1, 0, EISA, 255, 255},
715	{1, 0, MCA, 255, 255},
716	{2, 0, ISA, 1, PCI},
717	{2, 0, EISA, 1, PCI},
718	{2, 0, MCA, 1, PCI}
719};
720
721
722/* the bus data */
723static bus_datum bus_data[NBUS];
724
725/* the IO INT data, one entry per possible APIC INTerrupt */
726static io_int  io_apic_ints[NINTR];
727
728static int nintrs;
729
730static int processor_entry	__P((proc_entry_ptr entry, int cpu));
731static int bus_entry		__P((bus_entry_ptr entry, int bus));
732static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
733static int int_entry		__P((int_entry_ptr entry, int intr));
734static int lookup_bus_type	__P((char *name));
735
736
737/*
738 * 1st pass on motherboard's Intel MP specification table.
739 *
740 * initializes:
741 *	mp_ncpus = 1
742 *
743 * determines:
744 *	cpu_apic_address (common to all CPUs)
745 *	io_apic_address[N]
746 *	mp_naps
747 *	mp_nbusses
748 *	mp_napics
749 *	nintrs
750 */
751static int
752mptable_pass1(void)
753{
754	int	x;
755	mpcth_t	cth;
756	int	totalSize;
757	void*	position;
758	int	count;
759	int	type;
760	int	mustpanic;
761
762	POSTCODE(MPTABLE_PASS1_POST);
763
764	mustpanic = 0;
765
766	/* clear various tables */
767	for (x = 0; x < NAPICID; ++x) {
768		io_apic_address[x] = ~0;	/* IO APIC address table */
769	}
770
771	/* init everything to empty */
772	mp_naps = 0;
773	mp_nbusses = 0;
774	mp_napics = 0;
775	nintrs = 0;
776
777	/* check for use of 'default' configuration */
778	if (MPFPS_MPFB1 != 0) {
779		/* use default addresses */
780		cpu_apic_address = DEFAULT_APIC_BASE;
781		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
782
783		/* fill in with defaults */
784		mp_naps = 2;		/* includes BSP */
785		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
786#if defined(APIC_IO)
787		mp_napics = 1;
788		nintrs = 16;
789#endif	/* APIC_IO */
790	}
791	else {
792		if ((cth = mpfps->pap) == 0)
793			panic("MP Configuration Table Header MISSING!");
794
795		cpu_apic_address = (vm_offset_t) cth->apic_address;
796
797		/* walk the table, recording info of interest */
798		totalSize = cth->base_table_length - sizeof(struct MPCTH);
799		position = (u_char *) cth + sizeof(struct MPCTH);
800		count = cth->entry_count;
801
802		while (count--) {
803			switch (type = *(u_char *) position) {
804			case 0: /* processor_entry */
805				if (((proc_entry_ptr)position)->cpu_flags
806					& PROCENTRY_FLAG_EN)
807					++mp_naps;
808				break;
809			case 1: /* bus_entry */
810				++mp_nbusses;
811				break;
812			case 2: /* io_apic_entry */
813				if (((io_apic_entry_ptr)position)->apic_flags
814					& IOAPICENTRY_FLAG_EN)
815					io_apic_address[mp_napics++] =
816					    (vm_offset_t)((io_apic_entry_ptr)
817						position)->apic_address;
818				break;
819			case 3: /* int_entry */
820				++nintrs;
821				break;
822			case 4:	/* int_entry */
823				break;
824			default:
825				panic("mpfps Base Table HOSED!");
826				/* NOTREACHED */
827			}
828
829			totalSize -= basetable_entry_types[type].length;
830			(u_char*)position += basetable_entry_types[type].length;
831		}
832	}
833
834	/* qualify the numbers */
835	if (mp_naps > NCPU) {
836		printf("Warning: only using %d of %d available CPUs!\n",
837			NCPU, mp_naps);
838		mp_naps = NCPU;
839	}
840	if (mp_nbusses > NBUS) {
841		printf("found %d busses, increase NBUS\n", mp_nbusses);
842		mustpanic = 1;
843	}
844	if (mp_napics > NAPIC) {
845		printf("found %d apics, increase NAPIC\n", mp_napics);
846		mustpanic = 1;
847	}
848	if (nintrs > NINTR) {
849		printf("found %d intrs, increase NINTR\n", nintrs);
850		mustpanic = 1;
851	}
852
853	/*
854	 * Count the BSP.
855	 * This is also used as a counter while starting the APs.
856	 */
857	mp_ncpus = 1;
858
859	--mp_naps;	/* subtract the BSP */
860
861	return mustpanic;
862}
863
864
865/*
866 * 2nd pass on motherboard's Intel MP specification table.
867 *
868 * sets:
869 *	boot_cpu_id
870 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
871 *	CPU_TO_ID(N), logical CPU to APIC ID table
872 *	IO_TO_ID(N), logical IO to APIC ID table
873 *	bus_data[N]
874 *	io_apic_ints[N]
875 */
876static int
877mptable_pass2(void)
878{
879	int     x;
880	mpcth_t cth;
881	int     totalSize;
882	void*   position;
883	int     count;
884	int     type;
885	int     apic, bus, cpu, intr;
886
887	POSTCODE(MPTABLE_PASS2_POST);
888
889	/* clear various tables */
890	for (x = 0; x < NAPICID; ++x) {
891		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
892		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
893		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
894	}
895
896	/* clear bus data table */
897	for (x = 0; x < NBUS; ++x)
898		bus_data[x].bus_id = 0xff;
899
900	/* clear IO APIC INT table */
901	for (x = 0; x < NINTR; ++x) {
902		io_apic_ints[x].int_type = 0xff;
903		io_apic_ints[x].int_vector = 0xff;
904	}
905
906	/* setup the cpu/apic mapping arrays */
907	boot_cpu_id = -1;
908
909	/* record whether PIC or virtual-wire mode */
910	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
911
912	/* check for use of 'default' configuration */
913	if (MPFPS_MPFB1 != 0)
914		return MPFPS_MPFB1;	/* return default configuration type */
915
916	if ((cth = mpfps->pap) == 0)
917		panic("MP Configuration Table Header MISSING!");
918
919	/* walk the table, recording info of interest */
920	totalSize = cth->base_table_length - sizeof(struct MPCTH);
921	position = (u_char *) cth + sizeof(struct MPCTH);
922	count = cth->entry_count;
923	apic = bus = intr = 0;
924	cpu = 1;				/* pre-count the BSP */
925
926	while (count--) {
927		switch (type = *(u_char *) position) {
928		case 0:
929			if (processor_entry(position, cpu))
930				++cpu;
931			break;
932		case 1:
933			if (bus_entry(position, bus))
934				++bus;
935			break;
936		case 2:
937			if (io_apic_entry(position, apic))
938				++apic;
939			break;
940		case 3:
941			if (int_entry(position, intr))
942				++intr;
943			break;
944		case 4:
945			/* int_entry(position); */
946			break;
947		default:
948			panic("mpfps Base Table HOSED!");
949			/* NOTREACHED */
950		}
951
952		totalSize -= basetable_entry_types[type].length;
953		(u_char *) position += basetable_entry_types[type].length;
954	}
955
956	if (boot_cpu_id == -1)
957		panic("NO BSP found!");
958
959	/* report fact that its NOT a default configuration */
960	return 0;
961}
962
963
964void
965assign_apic_irq(int apic, int intpin, int irq)
966{
967	int x;
968
969	if (int_to_apicintpin[irq].ioapic != -1)
970		panic("assign_apic_irq: inconsistent table");
971
972	int_to_apicintpin[irq].ioapic = apic;
973	int_to_apicintpin[irq].int_pin = intpin;
974	int_to_apicintpin[irq].apic_address = ioapic[apic];
975	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
976
977	for (x = 0; x < nintrs; x++) {
978		if ((io_apic_ints[x].int_type == 0 ||
979		     io_apic_ints[x].int_type == 3) &&
980		    io_apic_ints[x].int_vector == 0xff &&
981		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
982		    io_apic_ints[x].dst_apic_int == intpin)
983			io_apic_ints[x].int_vector = irq;
984	}
985}
986
987void
988revoke_apic_irq(int irq)
989{
990	int x;
991	int oldapic;
992	int oldintpin;
993
994	if (int_to_apicintpin[irq].ioapic == -1)
995		panic("assign_apic_irq: inconsistent table");
996
997	oldapic = int_to_apicintpin[irq].ioapic;
998	oldintpin = int_to_apicintpin[irq].int_pin;
999
1000	int_to_apicintpin[irq].ioapic = -1;
1001	int_to_apicintpin[irq].int_pin = 0;
1002	int_to_apicintpin[irq].apic_address = NULL;
1003	int_to_apicintpin[irq].redirindex = 0;
1004
1005	for (x = 0; x < nintrs; x++) {
1006		if ((io_apic_ints[x].int_type == 0 ||
1007		     io_apic_ints[x].int_type == 3) &&
1008		    io_apic_ints[x].int_vector == 0xff &&
1009		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1010		    io_apic_ints[x].dst_apic_int == oldintpin)
1011			io_apic_ints[x].int_vector = 0xff;
1012	}
1013}
1014
1015/*
1016 * parse an Intel MP specification table
1017 */
1018static void
1019fix_mp_table(void)
1020{
1021	int	x;
1022	int	id;
1023	int	bus_0 = 0;	/* Stop GCC warning */
1024	int	bus_pci = 0;	/* Stop GCC warning */
1025	int	num_pci_bus;
1026
1027	/*
1028	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1029	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1030	 * exists the BIOS must begin with bus entries for the PCI bus and use
1031	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1032	 * exists the BIOS can choose to ignore this ordering, and indeed many
1033	 * MP motherboards do ignore it.  This causes a problem when the PCI
1034	 * sub-system makes requests of the MP sub-system based on PCI bus
1035	 * numbers.	So here we look for the situation and renumber the
1036	 * busses and associated INTs in an effort to "make it right".
1037	 */
1038
1039	/* find bus 0, PCI bus, count the number of PCI busses */
1040	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1041		if (bus_data[x].bus_id == 0) {
1042			bus_0 = x;
1043		}
1044		if (bus_data[x].bus_type == PCI) {
1045			++num_pci_bus;
1046			bus_pci = x;
1047		}
1048	}
1049	/*
1050	 * bus_0 == slot of bus with ID of 0
1051	 * bus_pci == slot of last PCI bus encountered
1052	 */
1053
1054	/* check the 1 PCI bus case for sanity */
1055	if (num_pci_bus == 1) {
1056
1057		/* if it is number 0 all is well */
1058		if (bus_data[bus_pci].bus_id == 0)
1059			return;
1060
1061		/* mis-numbered, swap with whichever bus uses slot 0 */
1062
1063		/* swap the bus entry types */
1064		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1065		bus_data[bus_0].bus_type = PCI;
1066
1067		/* swap each relavant INTerrupt entry */
1068		id = bus_data[bus_pci].bus_id;
1069		for (x = 0; x < nintrs; ++x) {
1070			if (io_apic_ints[x].src_bus_id == id) {
1071				io_apic_ints[x].src_bus_id = 0;
1072			}
1073			else if (io_apic_ints[x].src_bus_id == 0) {
1074				io_apic_ints[x].src_bus_id = id;
1075			}
1076		}
1077	}
1078}
1079
1080
1081/* Assign low level interrupt handlers */
1082static void
1083setup_apic_irq_mapping(void)
1084{
1085	int	x;
1086	int	int_vector;
1087
1088	/* Clear array */
1089	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1090		int_to_apicintpin[x].ioapic = -1;
1091		int_to_apicintpin[x].int_pin = 0;
1092		int_to_apicintpin[x].apic_address = NULL;
1093		int_to_apicintpin[x].redirindex = 0;
1094	}
1095
1096	/* First assign ISA/EISA interrupts */
1097	for (x = 0; x < nintrs; x++) {
1098		int_vector = io_apic_ints[x].src_bus_irq;
1099		if (int_vector < APIC_INTMAPSIZE &&
1100		    io_apic_ints[x].int_vector == 0xff &&
1101		    int_to_apicintpin[int_vector].ioapic == -1 &&
1102		    (apic_int_is_bus_type(x, ISA) ||
1103		     apic_int_is_bus_type(x, EISA)) &&
1104		    io_apic_ints[x].int_type == 0) {
1105			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1106					io_apic_ints[x].dst_apic_int,
1107					int_vector);
1108		}
1109	}
1110
1111	/* Assign interrupts on first 24 intpins on IOAPIC #0 */
1112	for (x = 0; x < nintrs; x++) {
1113		int_vector = io_apic_ints[x].dst_apic_int;
1114		if (int_vector < APIC_INTMAPSIZE &&
1115		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1116		    io_apic_ints[x].int_vector == 0xff &&
1117		    int_to_apicintpin[int_vector].ioapic == -1 &&
1118		    (io_apic_ints[x].int_type == 0 ||
1119		     io_apic_ints[x].int_type == 3)) {
1120			assign_apic_irq(0,
1121					io_apic_ints[x].dst_apic_int,
1122					int_vector);
1123		}
1124	}
1125	/*
1126	 * Assign interrupts for remaining intpins.
1127	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1128	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1129	 * due to 8254 interrupts not being delivered can reuse that low level
1130	 * interrupt handler.
1131	 */
1132	int_vector = 0;
1133	while (int_vector < APIC_INTMAPSIZE &&
1134	       int_to_apicintpin[int_vector].ioapic != -1)
1135		int_vector++;
1136	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1137		if ((io_apic_ints[x].int_type == 0 ||
1138		     (io_apic_ints[x].int_type == 3 &&
1139		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1140		       io_apic_ints[x].dst_apic_int != 0))) &&
1141		    io_apic_ints[x].int_vector == 0xff) {
1142			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1143					io_apic_ints[x].dst_apic_int,
1144					int_vector);
1145			int_vector++;
1146			while (int_vector < APIC_INTMAPSIZE &&
1147			       int_to_apicintpin[int_vector].ioapic != -1)
1148				int_vector++;
1149		}
1150	}
1151}
1152
1153
1154static int
1155processor_entry(proc_entry_ptr entry, int cpu)
1156{
1157	/* check for usability */
1158	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1159		return 0;
1160
1161	/* check for BSP flag */
1162	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1163		boot_cpu_id = entry->apic_id;
1164		CPU_TO_ID(0) = entry->apic_id;
1165		ID_TO_CPU(entry->apic_id) = 0;
1166		return 0;	/* its already been counted */
1167	}
1168
1169	/* add another AP to list, if less than max number of CPUs */
1170	else if (cpu < NCPU) {
1171		CPU_TO_ID(cpu) = entry->apic_id;
1172		ID_TO_CPU(entry->apic_id) = cpu;
1173		return 1;
1174	}
1175
1176	return 0;
1177}
1178
1179
1180static int
1181bus_entry(bus_entry_ptr entry, int bus)
1182{
1183	int     x;
1184	char    c, name[8];
1185
1186	/* encode the name into an index */
1187	for (x = 0; x < 6; ++x) {
1188		if ((c = entry->bus_type[x]) == ' ')
1189			break;
1190		name[x] = c;
1191	}
1192	name[x] = '\0';
1193
1194	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1195		panic("unknown bus type: '%s'", name);
1196
1197	bus_data[bus].bus_id = entry->bus_id;
1198	bus_data[bus].bus_type = x;
1199
1200	return 1;
1201}
1202
1203
1204static int
1205io_apic_entry(io_apic_entry_ptr entry, int apic)
1206{
1207	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1208		return 0;
1209
1210	IO_TO_ID(apic) = entry->apic_id;
1211	ID_TO_IO(entry->apic_id) = apic;
1212
1213	return 1;
1214}
1215
1216
1217static int
1218lookup_bus_type(char *name)
1219{
1220	int     x;
1221
1222	for (x = 0; x < MAX_BUSTYPE; ++x)
1223		if (strcmp(bus_type_table[x].name, name) == 0)
1224			return bus_type_table[x].type;
1225
1226	return UNKNOWN_BUSTYPE;
1227}
1228
1229
1230static int
1231int_entry(int_entry_ptr entry, int intr)
1232{
1233	int apic;
1234
1235	io_apic_ints[intr].int_type = entry->int_type;
1236	io_apic_ints[intr].int_flags = entry->int_flags;
1237	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1238	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1239	if (entry->dst_apic_id == 255) {
1240		/* This signal goes to all IO APICS.  Select an IO APIC
1241		   with sufficient number of interrupt pins */
1242		for (apic = 0; apic < mp_napics; apic++)
1243			if (((io_apic_read(apic, IOAPIC_VER) &
1244			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1245			    entry->dst_apic_int)
1246				break;
1247		if (apic < mp_napics)
1248			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1249		else
1250			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1251	} else
1252		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1253	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1254
1255	return 1;
1256}
1257
1258
1259static int
1260apic_int_is_bus_type(int intr, int bus_type)
1261{
1262	int     bus;
1263
1264	for (bus = 0; bus < mp_nbusses; ++bus)
1265		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1266		    && ((int) bus_data[bus].bus_type == bus_type))
1267			return 1;
1268
1269	return 0;
1270}
1271
1272
1273/*
1274 * Given a traditional ISA INT mask, return an APIC mask.
1275 */
1276u_int
1277isa_apic_mask(u_int isa_mask)
1278{
1279	int isa_irq;
1280	int apic_pin;
1281
1282#if defined(SKIP_IRQ15_REDIRECT)
1283	if (isa_mask == (1 << 15)) {
1284		printf("skipping ISA IRQ15 redirect\n");
1285		return isa_mask;
1286	}
1287#endif  /* SKIP_IRQ15_REDIRECT */
1288
1289	isa_irq = ffs(isa_mask);		/* find its bit position */
1290	if (isa_irq == 0)			/* doesn't exist */
1291		return 0;
1292	--isa_irq;				/* make it zero based */
1293
1294	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1295	if (apic_pin == -1)
1296		return 0;
1297
1298	return (1 << apic_pin);			/* convert pin# to a mask */
1299}
1300
1301
1302/*
1303 * Determine which APIC pin an ISA/EISA INT is attached to.
1304 */
1305#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1306#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1307#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1308#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1309
1310#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1311int
1312isa_apic_irq(int isa_irq)
1313{
1314	int     intr;
1315
1316	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1317		if (INTTYPE(intr) == 0) {		/* standard INT */
1318			if (SRCBUSIRQ(intr) == isa_irq) {
1319				if (apic_int_is_bus_type(intr, ISA) ||
1320			            apic_int_is_bus_type(intr, EISA))
1321					return INTIRQ(intr);	/* found */
1322			}
1323		}
1324	}
1325	return -1;					/* NOT found */
1326}
1327
1328
1329/*
1330 * Determine which APIC pin a PCI INT is attached to.
1331 */
1332#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1333#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1334#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1335int
1336pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1337{
1338	int     intr;
1339
1340	--pciInt;					/* zero based */
1341
1342	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1343		if ((INTTYPE(intr) == 0)		/* standard INT */
1344		    && (SRCBUSID(intr) == pciBus)
1345		    && (SRCBUSDEVICE(intr) == pciDevice)
1346		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1347			if (apic_int_is_bus_type(intr, PCI))
1348				return INTIRQ(intr);	/* exact match */
1349
1350	return -1;					/* NOT found */
1351}
1352
1353int
1354next_apic_irq(int irq)
1355{
1356	int intr, ointr;
1357	int bus, bustype;
1358
1359	bus = 0;
1360	bustype = 0;
1361	for (intr = 0; intr < nintrs; intr++) {
1362		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1363			continue;
1364		bus = SRCBUSID(intr);
1365		bustype = apic_bus_type(bus);
1366		if (bustype != ISA &&
1367		    bustype != EISA &&
1368		    bustype != PCI)
1369			continue;
1370		break;
1371	}
1372	if (intr >= nintrs) {
1373		return -1;
1374	}
1375	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1376		if (INTTYPE(ointr) != 0)
1377			continue;
1378		if (bus != SRCBUSID(ointr))
1379			continue;
1380		if (bustype == PCI) {
1381			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1382				continue;
1383			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1384				continue;
1385		}
1386		if (bustype == ISA || bustype == EISA) {
1387			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1388				continue;
1389		}
1390		if (INTPIN(intr) == INTPIN(ointr))
1391			continue;
1392		break;
1393	}
1394	if (ointr >= nintrs) {
1395		return -1;
1396	}
1397	return INTIRQ(ointr);
1398}
1399#undef SRCBUSLINE
1400#undef SRCBUSDEVICE
1401#undef SRCBUSID
1402#undef SRCBUSIRQ
1403
1404#undef INTPIN
1405#undef INTIRQ
1406#undef INTAPIC
1407#undef INTTYPE
1408
1409
1410/*
1411 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1412 *
1413 * XXX FIXME:
1414 *  Exactly what this means is unclear at this point.  It is a solution
1415 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1416 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1417 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1418 *  option.
1419 */
1420int
1421undirect_isa_irq(int rirq)
1422{
1423#if defined(READY)
1424	if (bootverbose)
1425	    printf("Freeing redirected ISA irq %d.\n", rirq);
1426	/** FIXME: tickle the MB redirector chip */
1427	return ???;
1428#else
1429	if (bootverbose)
1430	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1431	return 0;
1432#endif  /* READY */
1433}
1434
1435
1436/*
1437 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1438 */
1439int
1440undirect_pci_irq(int rirq)
1441{
1442#if defined(READY)
1443	if (bootverbose)
1444		printf("Freeing redirected PCI irq %d.\n", rirq);
1445
1446	/** FIXME: tickle the MB redirector chip */
1447	return ???;
1448#else
1449	if (bootverbose)
1450		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1451		       rirq);
1452	return 0;
1453#endif  /* READY */
1454}
1455
1456
1457/*
1458 * given a bus ID, return:
1459 *  the bus type if found
1460 *  -1 if NOT found
1461 */
1462int
1463apic_bus_type(int id)
1464{
1465	int     x;
1466
1467	for (x = 0; x < mp_nbusses; ++x)
1468		if (bus_data[x].bus_id == id)
1469			return bus_data[x].bus_type;
1470
1471	return -1;
1472}
1473
1474
1475/*
1476 * given a LOGICAL APIC# and pin#, return:
1477 *  the associated src bus ID if found
1478 *  -1 if NOT found
1479 */
1480int
1481apic_src_bus_id(int apic, int pin)
1482{
1483	int     x;
1484
1485	/* search each of the possible INTerrupt sources */
1486	for (x = 0; x < nintrs; ++x)
1487		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1488		    (pin == io_apic_ints[x].dst_apic_int))
1489			return (io_apic_ints[x].src_bus_id);
1490
1491	return -1;		/* NOT found */
1492}
1493
1494
1495/*
1496 * given a LOGICAL APIC# and pin#, return:
1497 *  the associated src bus IRQ if found
1498 *  -1 if NOT found
1499 */
1500int
1501apic_src_bus_irq(int apic, int pin)
1502{
1503	int     x;
1504
1505	for (x = 0; x < nintrs; x++)
1506		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1507		    (pin == io_apic_ints[x].dst_apic_int))
1508			return (io_apic_ints[x].src_bus_irq);
1509
1510	return -1;		/* NOT found */
1511}
1512
1513
1514/*
1515 * given a LOGICAL APIC# and pin#, return:
1516 *  the associated INTerrupt type if found
1517 *  -1 if NOT found
1518 */
1519int
1520apic_int_type(int apic, int pin)
1521{
1522	int     x;
1523
1524	/* search each of the possible INTerrupt sources */
1525	for (x = 0; x < nintrs; ++x)
1526		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1527		    (pin == io_apic_ints[x].dst_apic_int))
1528			return (io_apic_ints[x].int_type);
1529
1530	return -1;		/* NOT found */
1531}
1532
1533int
1534apic_irq(int apic, int pin)
1535{
1536	int x;
1537	int res;
1538
1539	for (x = 0; x < nintrs; ++x)
1540		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1541		    (pin == io_apic_ints[x].dst_apic_int)) {
1542			res = io_apic_ints[x].int_vector;
1543			if (res == 0xff)
1544				return -1;
1545			if (apic != int_to_apicintpin[res].ioapic)
1546				panic("apic_irq: inconsistent table");
1547			if (pin != int_to_apicintpin[res].int_pin)
1548				panic("apic_irq inconsistent table (2)");
1549			return res;
1550		}
1551	return -1;
1552}
1553
1554
1555/*
1556 * given a LOGICAL APIC# and pin#, return:
1557 *  the associated trigger mode if found
1558 *  -1 if NOT found
1559 */
1560int
1561apic_trigger(int apic, int pin)
1562{
1563	int     x;
1564
1565	/* search each of the possible INTerrupt sources */
1566	for (x = 0; x < nintrs; ++x)
1567		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1568		    (pin == io_apic_ints[x].dst_apic_int))
1569			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1570
1571	return -1;		/* NOT found */
1572}
1573
1574
1575/*
1576 * given a LOGICAL APIC# and pin#, return:
1577 *  the associated 'active' level if found
1578 *  -1 if NOT found
1579 */
1580int
1581apic_polarity(int apic, int pin)
1582{
1583	int     x;
1584
1585	/* search each of the possible INTerrupt sources */
1586	for (x = 0; x < nintrs; ++x)
1587		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1588		    (pin == io_apic_ints[x].dst_apic_int))
1589			return (io_apic_ints[x].int_flags & 0x03);
1590
1591	return -1;		/* NOT found */
1592}
1593
1594
1595/*
1596 * set data according to MP defaults
1597 * FIXME: probably not complete yet...
1598 */
1599static void
1600default_mp_table(int type)
1601{
1602	int     ap_cpu_id;
1603#if defined(APIC_IO)
1604	u_int32_t ux;
1605	int     io_apic_id;
1606	int     pin;
1607#endif	/* APIC_IO */
1608
1609#if 0
1610	printf("  MP default config type: %d\n", type);
1611	switch (type) {
1612	case 1:
1613		printf("   bus: ISA, APIC: 82489DX\n");
1614		break;
1615	case 2:
1616		printf("   bus: EISA, APIC: 82489DX\n");
1617		break;
1618	case 3:
1619		printf("   bus: EISA, APIC: 82489DX\n");
1620		break;
1621	case 4:
1622		printf("   bus: MCA, APIC: 82489DX\n");
1623		break;
1624	case 5:
1625		printf("   bus: ISA+PCI, APIC: Integrated\n");
1626		break;
1627	case 6:
1628		printf("   bus: EISA+PCI, APIC: Integrated\n");
1629		break;
1630	case 7:
1631		printf("   bus: MCA+PCI, APIC: Integrated\n");
1632		break;
1633	default:
1634		printf("   future type\n");
1635		break;
1636		/* NOTREACHED */
1637	}
1638#endif	/* 0 */
1639
1640	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1641	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1642
1643	/* BSP */
1644	CPU_TO_ID(0) = boot_cpu_id;
1645	ID_TO_CPU(boot_cpu_id) = 0;
1646
1647	/* one and only AP */
1648	CPU_TO_ID(1) = ap_cpu_id;
1649	ID_TO_CPU(ap_cpu_id) = 1;
1650
1651#if defined(APIC_IO)
1652	/* one and only IO APIC */
1653	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1654
1655	/*
1656	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1657	 * necessary as some hardware isn't properly setting up the IO APIC
1658	 */
1659#if defined(REALLY_ANAL_IOAPICID_VALUE)
1660	if (io_apic_id != 2) {
1661#else
1662	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1663#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1664		ux = io_apic_read(0, IOAPIC_ID);	/* get current contents */
1665		ux &= ~APIC_ID_MASK;	/* clear the ID field */
1666		ux |= 0x02000000;	/* set it to '2' */
1667		io_apic_write(0, IOAPIC_ID, ux);	/* write new value */
1668		ux = io_apic_read(0, IOAPIC_ID);	/* re-read && test */
1669		if ((ux & APIC_ID_MASK) != 0x02000000)
1670			panic("can't control IO APIC ID, reg: 0x%08x", ux);
1671		io_apic_id = 2;
1672	}
1673	IO_TO_ID(0) = io_apic_id;
1674	ID_TO_IO(io_apic_id) = 0;
1675#endif	/* APIC_IO */
1676
1677	/* fill out bus entries */
1678	switch (type) {
1679	case 1:
1680	case 2:
1681	case 3:
1682	case 4:
1683	case 5:
1684	case 6:
1685	case 7:
1686		bus_data[0].bus_id = default_data[type - 1][1];
1687		bus_data[0].bus_type = default_data[type - 1][2];
1688		bus_data[1].bus_id = default_data[type - 1][3];
1689		bus_data[1].bus_type = default_data[type - 1][4];
1690		break;
1691
1692	/* case 4: case 7:		   MCA NOT supported */
1693	default:		/* illegal/reserved */
1694		panic("BAD default MP config: %d", type);
1695		/* NOTREACHED */
1696	}
1697
1698#if defined(APIC_IO)
1699	/* general cases from MP v1.4, table 5-2 */
1700	for (pin = 0; pin < 16; ++pin) {
1701		io_apic_ints[pin].int_type = 0;
1702		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1703		io_apic_ints[pin].src_bus_id = 0;
1704		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1705		io_apic_ints[pin].dst_apic_id = io_apic_id;
1706		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1707	}
1708
1709	/* special cases from MP v1.4, table 5-2 */
1710	if (type == 2) {
1711		io_apic_ints[2].int_type = 0xff;	/* N/C */
1712		io_apic_ints[13].int_type = 0xff;	/* N/C */
1713#if !defined(APIC_MIXED_MODE)
1714		/** FIXME: ??? */
1715		panic("sorry, can't support type 2 default yet");
1716#endif	/* APIC_MIXED_MODE */
1717	}
1718	else
1719		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1720
1721	if (type == 7)
1722		io_apic_ints[0].int_type = 0xff;	/* N/C */
1723	else
1724		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1725#endif	/* APIC_IO */
1726}
1727
1728
1729/*
1730 * initialize all the SMP locks
1731 */
1732
1733/* critical region around IO APIC, apic_imen */
1734struct simplelock	imen_lock;
1735
1736/* critical region around splxx(), cpl, cml, cil, ipending */
1737struct simplelock	cpl_lock;
1738
1739/* Make FAST_INTR() routines sequential */
1740struct simplelock	fast_intr_lock;
1741
1742/* critical region around INTR() routines */
1743struct simplelock	intr_lock;
1744
1745/* lock regions protected in UP kernel via cli/sti */
1746struct simplelock	mpintr_lock;
1747
1748/* lock region used by kernel profiling */
1749struct simplelock	mcount_lock;
1750
1751#ifdef USE_COMLOCK
1752/* locks com (tty) data/hardware accesses: a FASTINTR() */
1753struct simplelock	com_lock;
1754#endif /* USE_COMLOCK */
1755
1756#ifdef USE_CLOCKLOCK
1757/* lock regions around the clock hardware */
1758struct simplelock	clock_lock;
1759#endif /* USE_CLOCKLOCK */
1760
1761/* lock around the MP rendezvous */
1762static struct simplelock smp_rv_lock;
1763
1764static void
1765init_locks(void)
1766{
1767	/*
1768	 * Get the initial mp_lock with a count of 1 for the BSP.
1769	 * This uses a LOGICAL cpu ID, ie BSP == 0.
1770	 */
1771	mp_lock = 0x00000001;
1772
1773#if 0
1774	/* ISR uses its own "giant lock" */
1775	isr_lock = FREE_LOCK;
1776#endif
1777
1778#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1779	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1780#endif
1781
1782	s_lock_init((struct simplelock*)&mpintr_lock);
1783
1784	s_lock_init((struct simplelock*)&mcount_lock);
1785
1786	s_lock_init((struct simplelock*)&fast_intr_lock);
1787	s_lock_init((struct simplelock*)&intr_lock);
1788	s_lock_init((struct simplelock*)&imen_lock);
1789	s_lock_init((struct simplelock*)&cpl_lock);
1790	s_lock_init(&smp_rv_lock);
1791
1792#ifdef USE_COMLOCK
1793	s_lock_init((struct simplelock*)&com_lock);
1794#endif /* USE_COMLOCK */
1795#ifdef USE_CLOCKLOCK
1796	s_lock_init((struct simplelock*)&clock_lock);
1797#endif /* USE_CLOCKLOCK */
1798}
1799
1800
1801/* Wait for all APs to be fully initialized */
1802extern int wait_ap(unsigned int);
1803
1804/*
1805 * start each AP in our list
1806 */
1807static int
1808start_all_aps(u_int boot_addr)
1809{
1810	int     x, i, pg;
1811	u_char  mpbiosreason;
1812	u_long  mpbioswarmvec;
1813	struct globaldata *gd;
1814	char *stack;
1815
1816	POSTCODE(START_ALL_APS_POST);
1817
1818	/* initialize BSP's local APIC */
1819	apic_initialize();
1820	bsp_apic_ready = 1;
1821
1822	/* install the AP 1st level boot code */
1823	install_ap_tramp(boot_addr);
1824
1825
1826	/* save the current value of the warm-start vector */
1827	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1828#ifndef PC98
1829	outb(CMOS_REG, BIOS_RESET);
1830	mpbiosreason = inb(CMOS_DATA);
1831#endif
1832
1833	/* record BSP in CPU map */
1834	all_cpus = 1;
1835
1836	/* set up 0 -> 4MB P==V mapping for AP boot */
1837	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1838	invltlb();
1839
1840	/* start each AP */
1841	for (x = 1; x <= mp_naps; ++x) {
1842
1843		/* This is a bit verbose, it will go away soon.  */
1844
1845		/* first page of AP's private space */
1846		pg = x * i386_btop(sizeof(struct privatespace));
1847
1848		/* allocate a new private data page */
1849		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1850
1851		/* wire it into the private page table page */
1852		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1853
1854		/* allocate and set up an idle stack data page */
1855		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1856		for (i = 0; i < UPAGES; i++)
1857			SMPpt[pg + 5 + i] = (pt_entry_t)
1858			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1859
1860		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1861		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1862		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1863		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1864
1865		/* prime data page for it to use */
1866		gd->gd_cpuid = x;
1867		gd->gd_cpu_lockid = x << 24;
1868		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
1869		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
1870		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
1871		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
1872		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
1873		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
1874		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
1875		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
1876
1877		/* setup a vector to our boot code */
1878		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1879		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1880#ifndef PC98
1881		outb(CMOS_REG, BIOS_RESET);
1882		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
1883#endif
1884
1885		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
1886		bootAP = x;
1887
1888		/* attempt to start the Application Processor */
1889		CHECK_INIT(99);	/* setup checkpoints */
1890		if (!start_ap(x, boot_addr)) {
1891			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
1892			CHECK_PRINT("trace");	/* show checkpoints */
1893			/* better panic as the AP may be running loose */
1894			printf("panic y/n? [y] ");
1895			if (cngetc() != 'n')
1896				panic("bye-bye");
1897		}
1898		CHECK_PRINT("trace");		/* show checkpoints */
1899
1900		/* record its version info */
1901		cpu_apic_versions[x] = cpu_apic_versions[0];
1902
1903		all_cpus |= (1 << x);		/* record AP in CPU map */
1904	}
1905
1906	/* build our map of 'other' CPUs */
1907	other_cpus = all_cpus & ~(1 << cpuid);
1908
1909	/* fill in our (BSP) APIC version */
1910	cpu_apic_versions[0] = lapic.version;
1911
1912	/* restore the warmstart vector */
1913	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
1914#ifndef PC98
1915	outb(CMOS_REG, BIOS_RESET);
1916	outb(CMOS_DATA, mpbiosreason);
1917#endif
1918
1919	/*
1920	 * Set up the idle context for the BSP.  Similar to above except
1921	 * that some was done by locore, some by pmap.c and some is implicit
1922	 * because the BSP is cpu#0 and the page is initially zero, and also
1923	 * because we can refer to variables by name on the BSP..
1924	 */
1925
1926	/* Allocate and setup BSP idle stack */
1927	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
1928	for (i = 0; i < UPAGES; i++)
1929		SMPpt[5 + i] = (pt_entry_t)
1930		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1931
1932	*(int *)PTD = 0;
1933	pmap_set_opt();
1934
1935	/* number of APs actually started */
1936	return mp_ncpus - 1;
1937}
1938
1939
1940/*
1941 * load the 1st level AP boot code into base memory.
1942 */
1943
1944/* targets for relocation */
1945extern void bigJump(void);
1946extern void bootCodeSeg(void);
1947extern void bootDataSeg(void);
1948extern void MPentry(void);
1949extern u_int MP_GDT;
1950extern u_int mp_gdtbase;
1951
1952static void
1953install_ap_tramp(u_int boot_addr)
1954{
1955	int     x;
1956	int     size = *(int *) ((u_long) & bootMP_size);
1957	u_char *src = (u_char *) ((u_long) bootMP);
1958	u_char *dst = (u_char *) boot_addr + KERNBASE;
1959	u_int   boot_base = (u_int) bootMP;
1960	u_int8_t *dst8;
1961	u_int16_t *dst16;
1962	u_int32_t *dst32;
1963
1964	POSTCODE(INSTALL_AP_TRAMP_POST);
1965
1966	for (x = 0; x < size; ++x)
1967		*dst++ = *src++;
1968
1969	/*
1970	 * modify addresses in code we just moved to basemem. unfortunately we
1971	 * need fairly detailed info about mpboot.s for this to work.  changes
1972	 * to mpboot.s might require changes here.
1973	 */
1974
1975	/* boot code is located in KERNEL space */
1976	dst = (u_char *) boot_addr + KERNBASE;
1977
1978	/* modify the lgdt arg */
1979	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1980	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
1981
1982	/* modify the ljmp target for MPentry() */
1983	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1984	*dst32 = ((u_int) MPentry - KERNBASE);
1985
1986	/* modify the target for boot code segment */
1987	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1988	dst8 = (u_int8_t *) (dst16 + 1);
1989	*dst16 = (u_int) boot_addr & 0xffff;
1990	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
1991
1992	/* modify the target for boot data segment */
1993	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1994	dst8 = (u_int8_t *) (dst16 + 1);
1995	*dst16 = (u_int) boot_addr & 0xffff;
1996	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
1997}
1998
1999
2000/*
2001 * this function starts the AP (application processor) identified
2002 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2003 * to accomplish this.  This is necessary because of the nuances
2004 * of the different hardware we might encounter.  It ain't pretty,
2005 * but it seems to work.
2006 */
2007static int
2008start_ap(int logical_cpu, u_int boot_addr)
2009{
2010	int     physical_cpu;
2011	int     vector;
2012	int     cpus;
2013	u_long  icr_lo, icr_hi;
2014
2015	POSTCODE(START_AP_POST);
2016
2017	/* get the PHYSICAL APIC ID# */
2018	physical_cpu = CPU_TO_ID(logical_cpu);
2019
2020	/* calculate the vector */
2021	vector = (boot_addr >> 12) & 0xff;
2022
2023	/* used as a watchpoint to signal AP startup */
2024	cpus = mp_ncpus;
2025
2026	/*
2027	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2028	 * and running the target CPU. OR this INIT IPI might be latched (P5
2029	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2030	 * ignored.
2031	 */
2032
2033	/* setup the address for the target AP */
2034	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2035	icr_hi |= (physical_cpu << 24);
2036	lapic.icr_hi = icr_hi;
2037
2038	/* do an INIT IPI: assert RESET */
2039	icr_lo = lapic.icr_lo & 0xfff00000;
2040	lapic.icr_lo = icr_lo | 0x0000c500;
2041
2042	/* wait for pending status end */
2043	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2044		 /* spin */ ;
2045
2046	/* do an INIT IPI: deassert RESET */
2047	lapic.icr_lo = icr_lo | 0x00008500;
2048
2049	/* wait for pending status end */
2050	u_sleep(10000);		/* wait ~10mS */
2051	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2052		 /* spin */ ;
2053
2054	/*
2055	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2056	 * latched, (P5 bug) this 1st STARTUP would then terminate
2057	 * immediately, and the previously started INIT IPI would continue. OR
2058	 * the previous INIT IPI has already run. and this STARTUP IPI will
2059	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2060	 * will run.
2061	 */
2062
2063	/* do a STARTUP IPI */
2064	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2065	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2066		 /* spin */ ;
2067	u_sleep(200);		/* wait ~200uS */
2068
2069	/*
2070	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2071	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2072	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2073	 * recognized after hardware RESET or INIT IPI.
2074	 */
2075
2076	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2077	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2078		 /* spin */ ;
2079	u_sleep(200);		/* wait ~200uS */
2080
2081	/* wait for it to start */
2082	set_apic_timer(5000000);/* == 5 seconds */
2083	while (read_apic_timer())
2084		if (mp_ncpus > cpus)
2085			return 1;	/* return SUCCESS */
2086
2087	return 0;		/* return FAILURE */
2088}
2089
2090
2091/*
2092 * Flush the TLB on all other CPU's
2093 *
2094 * XXX: Needs to handshake and wait for completion before proceding.
2095 */
2096void
2097smp_invltlb(void)
2098{
2099#if defined(APIC_IO)
2100	if (smp_started && invltlb_ok)
2101		all_but_self_ipi(XINVLTLB_OFFSET);
2102#endif  /* APIC_IO */
2103}
2104
2105void
2106invlpg(u_int addr)
2107{
2108	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2109
2110	/* send a message to the other CPUs */
2111	smp_invltlb();
2112}
2113
2114void
2115invltlb(void)
2116{
2117	u_long  temp;
2118
2119	/*
2120	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2121	 * inlined.
2122	 */
2123	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2124
2125	/* send a message to the other CPUs */
2126	smp_invltlb();
2127}
2128
2129
2130/*
2131 * When called the executing CPU will send an IPI to all other CPUs
2132 *  requesting that they halt execution.
2133 *
2134 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2135 *
2136 *  - Signals all CPUs in map to stop.
2137 *  - Waits for each to stop.
2138 *
2139 * Returns:
2140 *  -1: error
2141 *   0: NA
2142 *   1: ok
2143 *
2144 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2145 *            from executing at same time.
2146 */
2147int
2148stop_cpus(u_int map)
2149{
2150	if (!smp_started)
2151		return 0;
2152
2153	/* send the Xcpustop IPI to all CPUs in map */
2154	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2155
2156	while ((stopped_cpus & map) != map)
2157		/* spin */ ;
2158
2159	return 1;
2160}
2161
2162
2163/*
2164 * Called by a CPU to restart stopped CPUs.
2165 *
2166 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2167 *
2168 *  - Signals all CPUs in map to restart.
2169 *  - Waits for each to restart.
2170 *
2171 * Returns:
2172 *  -1: error
2173 *   0: NA
2174 *   1: ok
2175 */
2176int
2177restart_cpus(u_int map)
2178{
2179	if (!smp_started)
2180		return 0;
2181
2182	started_cpus = map;		/* signal other cpus to restart */
2183
2184	while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2185		/* spin */ ;
2186
2187	return 1;
2188}
2189
2190int smp_active = 0;	/* are the APs allowed to run? */
2191SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2192
2193/* XXX maybe should be hw.ncpu */
2194static int smp_cpus = 1;	/* how many cpu's running */
2195SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2196
2197int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2198SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2199
2200/* Warning: Do not staticize.  Used from swtch.s */
2201int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2202SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2203	   &do_page_zero_idle, 0, "");
2204
2205/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2206int forward_irq_enabled = 1;
2207SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2208	   &forward_irq_enabled, 0, "");
2209
2210/* Enable forwarding of a signal to a process running on a different CPU */
2211static int forward_signal_enabled = 1;
2212SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2213	   &forward_signal_enabled, 0, "");
2214
2215/* Enable forwarding of roundrobin to all other cpus */
2216static int forward_roundrobin_enabled = 1;
2217SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2218	   &forward_roundrobin_enabled, 0, "");
2219
2220/*
2221 * This is called once the rest of the system is up and running and we're
2222 * ready to let the AP's out of the pen.
2223 */
2224void ap_init(void);
2225
2226void
2227ap_init()
2228{
2229	u_int	apic_id;
2230
2231	/* BSP may have changed PTD while we're waiting for the lock */
2232	cpu_invltlb();
2233
2234	smp_cpus++;
2235
2236#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2237	lidt(&r_idt);
2238#endif
2239
2240	/* Build our map of 'other' CPUs. */
2241	other_cpus = all_cpus & ~(1 << cpuid);
2242
2243	printf("SMP: AP CPU #%d Launched!\n", cpuid);
2244
2245	/* XXX FIXME: i386 specific, and redundant: Setup the FPU. */
2246	load_cr0((rcr0() & ~CR0_EM) | CR0_MP | CR0_NE | CR0_TS);
2247
2248	/* set up FPU state on the AP */
2249	npxinit(__INITIAL_NPXCW__);
2250
2251	/* A quick check from sanity claus */
2252	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2253	if (cpuid != apic_id) {
2254		printf("SMP: cpuid = %d\n", cpuid);
2255		printf("SMP: apic_id = %d\n", apic_id);
2256		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2257		panic("cpuid mismatch! boom!!");
2258	}
2259
2260	/* Init local apic for irq's */
2261	apic_initialize();
2262
2263	/* Set memory range attributes for this CPU to match the BSP */
2264	mem_range_AP_init();
2265
2266	/*
2267	 * Activate smp_invltlb, although strictly speaking, this isn't
2268	 * quite correct yet.  We should have a bitfield for cpus willing
2269	 * to accept TLB flush IPI's or something and sync them.
2270	 */
2271	if (smp_cpus == mp_ncpus) {
2272		invltlb_ok = 1;
2273		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2274		smp_active = 1;	 /* historic */
2275	}
2276}
2277
2278#ifdef BETTER_CLOCK
2279
2280#define CHECKSTATE_USER	0
2281#define CHECKSTATE_SYS	1
2282#define CHECKSTATE_INTR	2
2283
2284/* Do not staticize.  Used from apic_vector.s */
2285struct proc*	checkstate_curproc[NCPU];
2286int		checkstate_cpustate[NCPU];
2287u_long		checkstate_pc[NCPU];
2288
2289extern long	cp_time[CPUSTATES];
2290
2291#define PC_TO_INDEX(pc, prof)				\
2292        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2293            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2294
2295static void
2296addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2297{
2298	int i;
2299	struct uprof *prof;
2300	u_long pc;
2301
2302	pc = checkstate_pc[id];
2303	prof = &p->p_stats->p_prof;
2304	if (pc >= prof->pr_off &&
2305	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2306		if ((p->p_flag & P_OWEUPC) == 0) {
2307			prof->pr_addr = pc;
2308			prof->pr_ticks = 1;
2309			p->p_flag |= P_OWEUPC;
2310		}
2311		*astmap |= (1 << id);
2312	}
2313}
2314
2315static void
2316forwarded_statclock(int id, int pscnt, int *astmap)
2317{
2318	struct pstats *pstats;
2319	long rss;
2320	struct rusage *ru;
2321	struct vmspace *vm;
2322	int cpustate;
2323	struct proc *p;
2324#ifdef GPROF
2325	register struct gmonparam *g;
2326	int i;
2327#endif
2328
2329	p = checkstate_curproc[id];
2330	cpustate = checkstate_cpustate[id];
2331
2332	switch (cpustate) {
2333	case CHECKSTATE_USER:
2334		if (p->p_flag & P_PROFIL)
2335			addupc_intr_forwarded(p, id, astmap);
2336		if (pscnt > 1)
2337			return;
2338		p->p_uticks++;
2339		if (p->p_nice > NZERO)
2340			cp_time[CP_NICE]++;
2341		else
2342			cp_time[CP_USER]++;
2343		break;
2344	case CHECKSTATE_SYS:
2345#ifdef GPROF
2346		/*
2347		 * Kernel statistics are just like addupc_intr, only easier.
2348		 */
2349		g = &_gmonparam;
2350		if (g->state == GMON_PROF_ON) {
2351			i = checkstate_pc[id] - g->lowpc;
2352			if (i < g->textsize) {
2353				i /= HISTFRACTION * sizeof(*g->kcount);
2354				g->kcount[i]++;
2355			}
2356		}
2357#endif
2358		if (pscnt > 1)
2359			return;
2360
2361		if (!p)
2362			cp_time[CP_IDLE]++;
2363		else {
2364			p->p_sticks++;
2365			cp_time[CP_SYS]++;
2366		}
2367		break;
2368	case CHECKSTATE_INTR:
2369	default:
2370#ifdef GPROF
2371		/*
2372		 * Kernel statistics are just like addupc_intr, only easier.
2373		 */
2374		g = &_gmonparam;
2375		if (g->state == GMON_PROF_ON) {
2376			i = checkstate_pc[id] - g->lowpc;
2377			if (i < g->textsize) {
2378				i /= HISTFRACTION * sizeof(*g->kcount);
2379				g->kcount[i]++;
2380			}
2381		}
2382#endif
2383		if (pscnt > 1)
2384			return;
2385		if (p)
2386			p->p_iticks++;
2387		cp_time[CP_INTR]++;
2388	}
2389	if (p != NULL) {
2390		schedclock(p);
2391
2392		/* Update resource usage integrals and maximums. */
2393		if ((pstats = p->p_stats) != NULL &&
2394		    (ru = &pstats->p_ru) != NULL &&
2395		    (vm = p->p_vmspace) != NULL) {
2396			ru->ru_ixrss += pgtok(vm->vm_tsize);
2397			ru->ru_idrss += pgtok(vm->vm_dsize);
2398			ru->ru_isrss += pgtok(vm->vm_ssize);
2399			rss = pgtok(vmspace_resident_count(vm));
2400			if (ru->ru_maxrss < rss)
2401				ru->ru_maxrss = rss;
2402        	}
2403	}
2404}
2405
2406void
2407forward_statclock(int pscnt)
2408{
2409	int map;
2410	int id;
2411	int i;
2412
2413	/* Kludge. We don't yet have separate locks for the interrupts
2414	 * and the kernel. This means that we cannot let the other processors
2415	 * handle complex interrupts while inhibiting them from entering
2416	 * the kernel in a non-interrupt context.
2417	 *
2418	 * What we can do, without changing the locking mechanisms yet,
2419	 * is letting the other processors handle a very simple interrupt
2420	 * (wich determines the processor states), and do the main
2421	 * work ourself.
2422	 */
2423
2424	if (!smp_started || !invltlb_ok || cold || panicstr)
2425		return;
2426
2427	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2428
2429	map = other_cpus & ~stopped_cpus ;
2430	checkstate_probed_cpus = 0;
2431	if (map != 0)
2432		selected_apic_ipi(map,
2433				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2434
2435	i = 0;
2436	while (checkstate_probed_cpus != map) {
2437		/* spin */
2438		i++;
2439		if (i == 100000) {
2440#ifdef BETTER_CLOCK_DIAGNOSTIC
2441			printf("forward_statclock: checkstate %x\n",
2442			       checkstate_probed_cpus);
2443#endif
2444			break;
2445		}
2446	}
2447
2448	/*
2449	 * Step 2: walk through other processors processes, update ticks and
2450	 * profiling info.
2451	 */
2452
2453	map = 0;
2454	for (id = 0; id < mp_ncpus; id++) {
2455		if (id == cpuid)
2456			continue;
2457		if (((1 << id) & checkstate_probed_cpus) == 0)
2458			continue;
2459		forwarded_statclock(id, pscnt, &map);
2460	}
2461	if (map != 0) {
2462		checkstate_need_ast |= map;
2463		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2464		i = 0;
2465		while ((checkstate_need_ast & map) != 0) {
2466			/* spin */
2467			i++;
2468			if (i > 100000) {
2469#ifdef BETTER_CLOCK_DIAGNOSTIC
2470				printf("forward_statclock: dropped ast 0x%x\n",
2471				       checkstate_need_ast & map);
2472#endif
2473				break;
2474			}
2475		}
2476	}
2477}
2478
2479void
2480forward_hardclock(int pscnt)
2481{
2482	int map;
2483	int id;
2484	struct proc *p;
2485	struct pstats *pstats;
2486	int i;
2487
2488	/* Kludge. We don't yet have separate locks for the interrupts
2489	 * and the kernel. This means that we cannot let the other processors
2490	 * handle complex interrupts while inhibiting them from entering
2491	 * the kernel in a non-interrupt context.
2492	 *
2493	 * What we can do, without changing the locking mechanisms yet,
2494	 * is letting the other processors handle a very simple interrupt
2495	 * (wich determines the processor states), and do the main
2496	 * work ourself.
2497	 */
2498
2499	if (!smp_started || !invltlb_ok || cold || panicstr)
2500		return;
2501
2502	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2503
2504	map = other_cpus & ~stopped_cpus ;
2505	checkstate_probed_cpus = 0;
2506	if (map != 0)
2507		selected_apic_ipi(map,
2508				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2509
2510	i = 0;
2511	while (checkstate_probed_cpus != map) {
2512		/* spin */
2513		i++;
2514		if (i == 100000) {
2515#ifdef BETTER_CLOCK_DIAGNOSTIC
2516			printf("forward_hardclock: checkstate %x\n",
2517			       checkstate_probed_cpus);
2518#endif
2519			break;
2520		}
2521	}
2522
2523	/*
2524	 * Step 2: walk through other processors processes, update virtual
2525	 * timer and profiling timer. If stathz == 0, also update ticks and
2526	 * profiling info.
2527	 */
2528
2529	map = 0;
2530	for (id = 0; id < mp_ncpus; id++) {
2531		if (id == cpuid)
2532			continue;
2533		if (((1 << id) & checkstate_probed_cpus) == 0)
2534			continue;
2535		p = checkstate_curproc[id];
2536		if (p) {
2537			pstats = p->p_stats;
2538			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2539			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2540			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2541				psignal(p, SIGVTALRM);
2542				map |= (1 << id);
2543			}
2544			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2545			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2546				psignal(p, SIGPROF);
2547				map |= (1 << id);
2548			}
2549		}
2550		if (stathz == 0) {
2551			forwarded_statclock( id, pscnt, &map);
2552		}
2553	}
2554	if (map != 0) {
2555		checkstate_need_ast |= map;
2556		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2557		i = 0;
2558		while ((checkstate_need_ast & map) != 0) {
2559			/* spin */
2560			i++;
2561			if (i > 100000) {
2562#ifdef BETTER_CLOCK_DIAGNOSTIC
2563				printf("forward_hardclock: dropped ast 0x%x\n",
2564				       checkstate_need_ast & map);
2565#endif
2566				break;
2567			}
2568		}
2569	}
2570}
2571
2572#endif /* BETTER_CLOCK */
2573
2574void
2575forward_signal(struct proc *p)
2576{
2577	int map;
2578	int id;
2579	int i;
2580
2581	/* Kludge. We don't yet have separate locks for the interrupts
2582	 * and the kernel. This means that we cannot let the other processors
2583	 * handle complex interrupts while inhibiting them from entering
2584	 * the kernel in a non-interrupt context.
2585	 *
2586	 * What we can do, without changing the locking mechanisms yet,
2587	 * is letting the other processors handle a very simple interrupt
2588	 * (wich determines the processor states), and do the main
2589	 * work ourself.
2590	 */
2591
2592	if (!smp_started || !invltlb_ok || cold || panicstr)
2593		return;
2594	if (!forward_signal_enabled)
2595		return;
2596	while (1) {
2597		if (p->p_stat != SRUN)
2598			return;
2599		id = p->p_oncpu;
2600		if (id == 0xff)
2601			return;
2602		map = (1<<id);
2603		checkstate_need_ast |= map;
2604		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2605		i = 0;
2606		while ((checkstate_need_ast & map) != 0) {
2607			/* spin */
2608			i++;
2609			if (i > 100000) {
2610#if 0
2611				printf("forward_signal: dropped ast 0x%x\n",
2612				       checkstate_need_ast & map);
2613#endif
2614				break;
2615			}
2616		}
2617		if (id == p->p_oncpu)
2618			return;
2619	}
2620}
2621
2622void
2623forward_roundrobin(void)
2624{
2625	u_int map;
2626	int i;
2627
2628	if (!smp_started || !invltlb_ok || cold || panicstr)
2629		return;
2630	if (!forward_roundrobin_enabled)
2631		return;
2632	resched_cpus |= other_cpus;
2633	map = other_cpus & ~stopped_cpus ;
2634#if 1
2635	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2636#else
2637	(void) all_but_self_ipi(XCPUAST_OFFSET);
2638#endif
2639	i = 0;
2640	while ((checkstate_need_ast & map) != 0) {
2641		/* spin */
2642		i++;
2643		if (i > 100000) {
2644#if 0
2645			printf("forward_roundrobin: dropped ast 0x%x\n",
2646			       checkstate_need_ast & map);
2647#endif
2648			break;
2649		}
2650	}
2651}
2652
2653
2654#ifdef APIC_INTR_REORDER
2655/*
2656 *	Maintain mapping from softintr vector to isr bit in local apic.
2657 */
2658void
2659set_lapic_isrloc(int intr, int vector)
2660{
2661	if (intr < 0 || intr > 32)
2662		panic("set_apic_isrloc: bad intr argument: %d",intr);
2663	if (vector < ICU_OFFSET || vector > 255)
2664		panic("set_apic_isrloc: bad vector argument: %d",vector);
2665	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2666	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2667}
2668#endif
2669
2670/*
2671 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2672 * (if specified), rendezvous, execute the action function (if specified),
2673 * rendezvous again, execute the teardown function (if specified), and then
2674 * resume.
2675 *
2676 * Note that the supplied external functions _must_ be reentrant and aware
2677 * that they are running in parallel and in an unknown lock context.
2678 */
2679static void (*smp_rv_setup_func)(void *arg);
2680static void (*smp_rv_action_func)(void *arg);
2681static void (*smp_rv_teardown_func)(void *arg);
2682static void *smp_rv_func_arg;
2683static volatile int smp_rv_waiters[2];
2684
2685void
2686smp_rendezvous_action(void)
2687{
2688	/* setup function */
2689	if (smp_rv_setup_func != NULL)
2690		smp_rv_setup_func(smp_rv_func_arg);
2691	/* spin on entry rendezvous */
2692	atomic_add_int(&smp_rv_waiters[0], 1);
2693	while (smp_rv_waiters[0] < mp_ncpus)
2694		;
2695	/* action function */
2696	if (smp_rv_action_func != NULL)
2697		smp_rv_action_func(smp_rv_func_arg);
2698	/* spin on exit rendezvous */
2699	atomic_add_int(&smp_rv_waiters[1], 1);
2700	while (smp_rv_waiters[1] < mp_ncpus)
2701		;
2702	/* teardown function */
2703	if (smp_rv_teardown_func != NULL)
2704		smp_rv_teardown_func(smp_rv_func_arg);
2705}
2706
2707void
2708smp_rendezvous(void (* setup_func)(void *),
2709	       void (* action_func)(void *),
2710	       void (* teardown_func)(void *),
2711	       void *arg)
2712{
2713	u_int	efl;
2714
2715	/* obtain rendezvous lock */
2716	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2717
2718	/* set static function pointers */
2719	smp_rv_setup_func = setup_func;
2720	smp_rv_action_func = action_func;
2721	smp_rv_teardown_func = teardown_func;
2722	smp_rv_func_arg = arg;
2723	smp_rv_waiters[0] = 0;
2724	smp_rv_waiters[1] = 0;
2725
2726	/* disable interrupts on this CPU, save interrupt status */
2727	efl = read_eflags();
2728	write_eflags(efl & ~PSL_I);
2729
2730	/* signal other processors, which will enter the IPI with interrupts off */
2731	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2732
2733	/* call executor function */
2734	smp_rendezvous_action();
2735
2736	/* restore interrupt flag */
2737	write_eflags(efl);
2738
2739	/* release lock */
2740	s_unlock(&smp_rv_lock);
2741}
2742