mp_x86.c revision 71243
157434Smarkm/*
257434Smarkm * Copyright (c) 1996, by Steve Passe
374818Sru * All rights reserved.
474818Sru *
557434Smarkm * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/i386/mp_machdep.c 71243 2001-01-19 10:48:35Z peter $
26 */
27
28#include "opt_cpu.h"
29#include "opt_user_ldt.h"
30
31#ifdef SMP
32#include <machine/smptests.h>
33#else
34#error
35#endif
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#ifdef BETTER_CLOCK
47#include <sys/dkstat.h>
48#endif
49#include <sys/cons.h>	/* cngetc() */
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56#ifdef BETTER_CLOCK
57#include <sys/lock.h>
58#include <vm/vm_map.h>
59#include <sys/user.h>
60#ifdef GPROF
61#include <sys/gmon.h>
62#endif
63#endif
64
65#include <machine/smp.h>
66#include <machine/apic.h>
67#include <machine/atomic.h>
68#include <machine/cpufunc.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h>		/* setidt() */
79#include <i386/isa/icu.h>		/* IPIs */
80#include <i386/isa/intr_machdep.h>	/* IPIs */
81#endif	/* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1	mpfps->mpfb1
87#endif  /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET		0
90#define WARMBOOT_OFF		(KERNBASE + 0x0467)
91#define WARMBOOT_SEG		(KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE		(0xe8000)
95#define BIOS_SIZE		(0x18000)
96#else
97#define BIOS_BASE		(0xf0000)
98#define BIOS_SIZE		(0x10000)
99#endif
100#define BIOS_COUNT		(BIOS_SIZE/4)
101
102#define CMOS_REG		(0x70)
103#define CMOS_DATA		(0x71)
104#define BIOS_RESET		(0x0f)
105#define BIOS_WARM		(0x0a)
106
107#define PROCENTRY_FLAG_EN	0x01
108#define PROCENTRY_FLAG_BP	0x02
109#define IOAPICENTRY_FLAG_EN	0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114	char    signature[4];
115	void   *pap;
116	u_char  length;
117	u_char  spec_rev;
118	u_char  checksum;
119	u_char  mpfb1;
120	u_char  mpfb2;
121	u_char  mpfb3;
122	u_char  mpfb4;
123	u_char  mpfb5;
124}      *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128	char    signature[4];
129	u_short base_table_length;
130	u_char  spec_rev;
131	u_char  checksum;
132	u_char  oem_id[8];
133	u_char  product_id[12];
134	void   *oem_table_pointer;
135	u_short oem_table_size;
136	u_short entry_count;
137	void   *apic_address;
138	u_short extended_table_length;
139	u_char  extended_table_checksum;
140	u_char  reserved;
141}      *mpcth_t;
142
143
144typedef struct PROCENTRY {
145	u_char  type;
146	u_char  apic_id;
147	u_char  apic_version;
148	u_char  cpu_flags;
149	u_long  cpu_signature;
150	u_long  feature_flags;
151	u_long  reserved1;
152	u_long  reserved2;
153}      *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156	u_char  type;
157	u_char  bus_id;
158	char    bus_type[6];
159}      *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162	u_char  type;
163	u_char  apic_id;
164	u_char  apic_version;
165	u_char  apic_flags;
166	void   *apic_address;
167}      *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170	u_char  type;
171	u_char  int_type;
172	u_short int_flags;
173	u_char  src_bus_id;
174	u_char  src_bus_irq;
175	u_char  dst_apic_id;
176	u_char  dst_apic_int;
177}      *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181	u_char  type;
182	u_char  length;
183	char    name[16];
184}       basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D);				\
199	CHECK_WRITE(0x34, (D));			\
200	CHECK_WRITE(0x35, (D));			\
201	CHECK_WRITE(0x36, (D));			\
202	CHECK_WRITE(0x37, (D));			\
203	CHECK_WRITE(0x38, (D));			\
204	CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S);				\
207	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208	   (S),					\
209	   CHECK_READ(0x34),			\
210	   CHECK_READ(0x35),			\
211	   CHECK_READ(0x36),			\
212	   CHECK_READ(0x37),			\
213	   CHECK_READ(0x38),			\
214	   CHECK_READ(0x39));
215
216#else				/* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif				/* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST	0x10
227#define MP_PROBE_POST		0x11
228#define MPTABLE_PASS1_POST	0x12
229
230#define MP_START_POST		0x13
231#define MP_ENABLE_POST		0x14
232#define MPTABLE_PASS2_POST	0x15
233
234#define START_ALL_APS_POST	0x16
235#define INSTALL_AP_TRAMP_POST	0x17
236#define START_AP_POST		0x18
237
238#define MP_ANNOUNCE_POST	0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct simplelock	ap_boot_lock;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int	current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250int	mp_ncpus;		/* # of CPUs, including BSP */
251int	mp_naps;		/* # of Applications processors */
252int	mp_nbusses;		/* # of busses */
253int	mp_napics;		/* # of IO APICs */
254int	boot_cpu_id;		/* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257extern	int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_DIAGNOSTIC
263int apic_itrace_enter[32];
264int apic_itrace_tryisrlock[32];
265int apic_itrace_gotisrlock[32];
266int apic_itrace_active[32];
267int apic_itrace_masked[32];
268int apic_itrace_noisrlock[32];
269int apic_itrace_masked2[32];
270int apic_itrace_unmask[32];
271int apic_itrace_noforward[32];
272int apic_itrace_leave[32];
273int apic_itrace_enter2[32];
274int apic_itrace_doreti[32];
275int apic_itrace_eoi[32];
276#ifdef APIC_INTR_DIAGNOSTIC_IRQ
277unsigned short apic_itrace_debugbuffer[32768];
278int apic_itrace_debugbuffer_idx;
279struct simplelock apic_itrace_debuglock;
280#endif
281#endif
282
283#ifdef APIC_INTR_REORDER
284struct {
285	volatile int *location;
286	int bit;
287} apic_isrbit_location[32];
288#endif
289
290struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
291
292/*
293 * APIC ID logical/physical mapping structures.
294 * We oversize these to simplify boot-time config.
295 */
296int     cpu_num_to_apic_id[NAPICID];
297int     io_num_to_apic_id[NAPICID];
298int     apic_id_to_logical[NAPICID];
299
300
301/* Bitmap of all available CPUs */
302u_int	all_cpus;
303
304/* AP uses this during bootstrap.  Do not staticize.  */
305char *bootSTK;
306static int bootAP;
307
308/* Hotwire a 0->4MB V==P mapping */
309extern pt_entry_t *KPTphys;
310
311/* SMP page table page */
312extern pt_entry_t *SMPpt;
313
314struct pcb stoppcbs[MAXCPU];
315
316int smp_started;		/* has the system started? */
317
318/*
319 * Local data and functions.
320 */
321
322static int	mp_capable;
323static u_int	boot_address;
324static u_int	base_memory;
325
326static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
327static mpfps_t	mpfps;
328static int	search_for_sig(u_int32_t target, int count);
329static void	mp_enable(u_int boot_addr);
330
331static void	mptable_pass1(void);
332static int	mptable_pass2(void);
333static void	default_mp_table(int type);
334static void	fix_mp_table(void);
335static void	setup_apic_irq_mapping(void);
336static void	init_locks(void);
337static int	start_all_aps(u_int boot_addr);
338static void	install_ap_tramp(u_int boot_addr);
339static int	start_ap(int logicalCpu, u_int boot_addr);
340static int	apic_int_is_bus_type(int intr, int bus_type);
341static void	release_aps(void *dummy);
342
343/*
344 * Calculate usable address in base memory for AP trampoline code.
345 */
346u_int
347mp_bootaddress(u_int basemem)
348{
349	POSTCODE(MP_BOOTADDRESS_POST);
350
351	base_memory = basemem * 1024;	/* convert to bytes */
352
353	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
354	if ((base_memory - boot_address) < bootMP_size)
355		boot_address -= 4096;	/* not enough, lower by 4k */
356
357	return boot_address;
358}
359
360
361/*
362 * Look for an Intel MP spec table (ie, SMP capable hardware).
363 */
364int
365mp_probe(void)
366{
367	int     x;
368	u_long  segment;
369	u_int32_t target;
370
371	POSTCODE(MP_PROBE_POST);
372
373	/* see if EBDA exists */
374	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
375		/* search first 1K of EBDA */
376		target = (u_int32_t) (segment << 4);
377		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
378			goto found;
379	} else {
380		/* last 1K of base memory, effective 'top of base' passed in */
381		target = (u_int32_t) (base_memory - 0x400);
382		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
383			goto found;
384	}
385
386	/* search the BIOS */
387	target = (u_int32_t) BIOS_BASE;
388	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
389		goto found;
390
391	/* nothing found */
392	mpfps = (mpfps_t)0;
393	mp_capable = 0;
394	return 0;
395
396found:
397	/* calculate needed resources */
398	mpfps = (mpfps_t)x;
399	mptable_pass1();
400
401	/* flag fact that we are running multiple processors */
402	mp_capable = 1;
403	return 1;
404}
405
406
407/*
408 * Initialize the SMP hardware and the APIC and start up the AP's.
409 */
410void
411mp_start(void)
412{
413	POSTCODE(MP_START_POST);
414
415	/* look for MP capable motherboard */
416	if (mp_capable)
417		mp_enable(boot_address);
418	else
419		panic("MP hardware not found!");
420}
421
422
423/*
424 * Print various information about the SMP system hardware and setup.
425 */
426void
427mp_announce(void)
428{
429	int     x;
430
431	POSTCODE(MP_ANNOUNCE_POST);
432
433	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
434	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
435	printf(", version: 0x%08x", cpu_apic_versions[0]);
436	printf(", at 0x%08x\n", cpu_apic_address);
437	for (x = 1; x <= mp_naps; ++x) {
438		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
439		printf(", version: 0x%08x", cpu_apic_versions[x]);
440		printf(", at 0x%08x\n", cpu_apic_address);
441	}
442
443#if defined(APIC_IO)
444	for (x = 0; x < mp_napics; ++x) {
445		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
446		printf(", version: 0x%08x", io_apic_versions[x]);
447		printf(", at 0x%08x\n", io_apic_address[x]);
448	}
449#else
450	printf(" Warning: APIC I/O disabled\n");
451#endif	/* APIC_IO */
452}
453
454/*
455 * AP cpu's call this to sync up protected mode.
456 */
457void
458init_secondary(void)
459{
460	int	gsel_tss;
461	int	x, myid = bootAP;
462
463	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
464	gdt_segs[GPROC0_SEL].ssd_base =
465		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
466	SMP_prvspace[myid].globaldata.gd_prvspace =
467		&SMP_prvspace[myid].globaldata;
468
469	for (x = 0; x < NGDT; x++) {
470		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
471	}
472
473	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
474	r_gdt.rd_base = (int) &gdt[myid * NGDT];
475	lgdt(&r_gdt);			/* does magic intra-segment return */
476
477	lidt(&r_idt);
478
479	lldt(_default_ldt);
480#ifdef USER_LDT
481	PCPU_SET(currentldt, _default_ldt);
482#endif
483
484	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
485	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
486	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
487	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
488	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
489	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
490	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
491	ltr(gsel_tss);
492
493	pmap_set_opt();
494}
495
496
497#if defined(APIC_IO)
498/*
499 * Final configuration of the BSP's local APIC:
500 *  - disable 'pic mode'.
501 *  - disable 'virtual wire mode'.
502 *  - enable NMI.
503 */
504void
505bsp_apic_configure(void)
506{
507	u_char		byte;
508	u_int32_t	temp;
509
510	/* leave 'pic mode' if necessary */
511	if (picmode) {
512		outb(0x22, 0x70);	/* select IMCR */
513		byte = inb(0x23);	/* current contents */
514		byte |= 0x01;		/* mask external INTR */
515		outb(0x23, byte);	/* disconnect 8259s/NMI */
516	}
517
518	/* mask lint0 (the 8259 'virtual wire' connection) */
519	temp = lapic.lvt_lint0;
520	temp |= APIC_LVT_M;		/* set the mask */
521	lapic.lvt_lint0 = temp;
522
523        /* setup lint1 to handle NMI */
524        temp = lapic.lvt_lint1;
525        temp &= ~APIC_LVT_M;		/* clear the mask */
526        lapic.lvt_lint1 = temp;
527
528	if (bootverbose)
529		apic_dump("bsp_apic_configure()");
530}
531#endif  /* APIC_IO */
532
533
534/*******************************************************************
535 * local functions and data
536 */
537
538/*
539 * start the SMP system
540 */
541static void
542mp_enable(u_int boot_addr)
543{
544	int     x;
545#if defined(APIC_IO)
546	int     apic;
547	u_int   ux;
548#endif	/* APIC_IO */
549
550	POSTCODE(MP_ENABLE_POST);
551
552	/* turn on 4MB of V == P addressing so we can get to MP table */
553	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
554	invltlb();
555
556	/* examine the MP table for needed info, uses physical addresses */
557	x = mptable_pass2();
558
559	*(int *)PTD = 0;
560	invltlb();
561
562	/* can't process default configs till the CPU APIC is pmapped */
563	if (x)
564		default_mp_table(x);
565
566	/* post scan cleanup */
567	fix_mp_table();
568	setup_apic_irq_mapping();
569
570#if defined(APIC_IO)
571
572	/* fill the LOGICAL io_apic_versions table */
573	for (apic = 0; apic < mp_napics; ++apic) {
574		ux = io_apic_read(apic, IOAPIC_VER);
575		io_apic_versions[apic] = ux;
576		io_apic_set_id(apic, IO_TO_ID(apic));
577	}
578
579	/* program each IO APIC in the system */
580	for (apic = 0; apic < mp_napics; ++apic)
581		if (io_apic_setup(apic) < 0)
582			panic("IO APIC setup failure");
583
584	/* install a 'Spurious INTerrupt' vector */
585	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
586	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
587
588	/* install an inter-CPU IPI for TLB invalidation */
589	setidt(XINVLTLB_OFFSET, Xinvltlb,
590	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
591
592#ifdef BETTER_CLOCK
593	/* install an inter-CPU IPI for reading processor state */
594	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
595	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
596#endif
597
598	/* install an inter-CPU IPI for all-CPU rendezvous */
599	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
600	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
601
602	/* install an inter-CPU IPI for forcing an additional software trap */
603	setidt(XCPUAST_OFFSET, Xcpuast,
604	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
605
606	/* install an inter-CPU IPI for interrupt forwarding */
607	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
608	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
609
610	/* install an inter-CPU IPI for CPU stop/restart */
611	setidt(XCPUSTOP_OFFSET, Xcpustop,
612	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
613
614#if defined(TEST_TEST1)
615	/* install a "fake hardware INTerrupt" vector */
616	setidt(XTEST1_OFFSET, Xtest1,
617	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
618#endif  /** TEST_TEST1 */
619
620#endif	/* APIC_IO */
621
622	/* initialize all SMP locks */
623	init_locks();
624
625	/* obtain the ap_boot_lock */
626	s_lock(&ap_boot_lock);
627
628	/* start each Application Processor */
629	start_all_aps(boot_addr);
630}
631
632
633/*
634 * look for the MP spec signature
635 */
636
637/* string defined by the Intel MP Spec as identifying the MP table */
638#define MP_SIG		0x5f504d5f	/* _MP_ */
639#define NEXT(X)		((X) += 4)
640static int
641search_for_sig(u_int32_t target, int count)
642{
643	int     x;
644	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
645
646	for (x = 0; x < count; NEXT(x))
647		if (addr[x] == MP_SIG)
648			/* make array index a byte index */
649			return (target + (x * sizeof(u_int32_t)));
650
651	return -1;
652}
653
654
655static basetable_entry basetable_entry_types[] =
656{
657	{0, 20, "Processor"},
658	{1, 8, "Bus"},
659	{2, 8, "I/O APIC"},
660	{3, 8, "I/O INT"},
661	{4, 8, "Local INT"}
662};
663
664typedef struct BUSDATA {
665	u_char  bus_id;
666	enum busTypes bus_type;
667}       bus_datum;
668
669typedef struct INTDATA {
670	u_char  int_type;
671	u_short int_flags;
672	u_char  src_bus_id;
673	u_char  src_bus_irq;
674	u_char  dst_apic_id;
675	u_char  dst_apic_int;
676	u_char	int_vector;
677}       io_int, local_int;
678
679typedef struct BUSTYPENAME {
680	u_char  type;
681	char    name[7];
682}       bus_type_name;
683
684static bus_type_name bus_type_table[] =
685{
686	{CBUS, "CBUS"},
687	{CBUSII, "CBUSII"},
688	{EISA, "EISA"},
689	{MCA, "MCA"},
690	{UNKNOWN_BUSTYPE, "---"},
691	{ISA, "ISA"},
692	{MCA, "MCA"},
693	{UNKNOWN_BUSTYPE, "---"},
694	{UNKNOWN_BUSTYPE, "---"},
695	{UNKNOWN_BUSTYPE, "---"},
696	{UNKNOWN_BUSTYPE, "---"},
697	{UNKNOWN_BUSTYPE, "---"},
698	{PCI, "PCI"},
699	{UNKNOWN_BUSTYPE, "---"},
700	{UNKNOWN_BUSTYPE, "---"},
701	{UNKNOWN_BUSTYPE, "---"},
702	{UNKNOWN_BUSTYPE, "---"},
703	{XPRESS, "XPRESS"},
704	{UNKNOWN_BUSTYPE, "---"}
705};
706/* from MP spec v1.4, table 5-1 */
707static int default_data[7][5] =
708{
709/*   nbus, id0, type0, id1, type1 */
710	{1, 0, ISA, 255, 255},
711	{1, 0, EISA, 255, 255},
712	{1, 0, EISA, 255, 255},
713	{1, 0, MCA, 255, 255},
714	{2, 0, ISA, 1, PCI},
715	{2, 0, EISA, 1, PCI},
716	{2, 0, MCA, 1, PCI}
717};
718
719
720/* the bus data */
721static bus_datum *bus_data;
722
723/* the IO INT data, one entry per possible APIC INTerrupt */
724static io_int  *io_apic_ints;
725
726static int nintrs;
727
728static int processor_entry	__P((proc_entry_ptr entry, int cpu));
729static int bus_entry		__P((bus_entry_ptr entry, int bus));
730static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
731static int int_entry		__P((int_entry_ptr entry, int intr));
732static int lookup_bus_type	__P((char *name));
733
734
735/*
736 * 1st pass on motherboard's Intel MP specification table.
737 *
738 * initializes:
739 *	mp_ncpus = 1
740 *
741 * determines:
742 *	cpu_apic_address (common to all CPUs)
743 *	io_apic_address[N]
744 *	mp_naps
745 *	mp_nbusses
746 *	mp_napics
747 *	nintrs
748 */
749static void
750mptable_pass1(void)
751{
752	int	x;
753	mpcth_t	cth;
754	int	totalSize;
755	void*	position;
756	int	count;
757	int	type;
758
759	POSTCODE(MPTABLE_PASS1_POST);
760
761	/* clear various tables */
762	for (x = 0; x < NAPICID; ++x) {
763		io_apic_address[x] = ~0;	/* IO APIC address table */
764	}
765
766	/* init everything to empty */
767	mp_naps = 0;
768	mp_nbusses = 0;
769	mp_napics = 0;
770	nintrs = 0;
771
772	/* check for use of 'default' configuration */
773	if (MPFPS_MPFB1 != 0) {
774		/* use default addresses */
775		cpu_apic_address = DEFAULT_APIC_BASE;
776		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
777
778		/* fill in with defaults */
779		mp_naps = 2;		/* includes BSP */
780		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
781#if defined(APIC_IO)
782		mp_napics = 1;
783		nintrs = 16;
784#endif	/* APIC_IO */
785	}
786	else {
787		if ((cth = mpfps->pap) == 0)
788			panic("MP Configuration Table Header MISSING!");
789
790		cpu_apic_address = (vm_offset_t) cth->apic_address;
791
792		/* walk the table, recording info of interest */
793		totalSize = cth->base_table_length - sizeof(struct MPCTH);
794		position = (u_char *) cth + sizeof(struct MPCTH);
795		count = cth->entry_count;
796
797		while (count--) {
798			switch (type = *(u_char *) position) {
799			case 0: /* processor_entry */
800				if (((proc_entry_ptr)position)->cpu_flags
801					& PROCENTRY_FLAG_EN)
802					++mp_naps;
803				break;
804			case 1: /* bus_entry */
805				++mp_nbusses;
806				break;
807			case 2: /* io_apic_entry */
808				if (((io_apic_entry_ptr)position)->apic_flags
809					& IOAPICENTRY_FLAG_EN)
810					io_apic_address[mp_napics++] =
811					    (vm_offset_t)((io_apic_entry_ptr)
812						position)->apic_address;
813				break;
814			case 3: /* int_entry */
815				++nintrs;
816				break;
817			case 4:	/* int_entry */
818				break;
819			default:
820				panic("mpfps Base Table HOSED!");
821				/* NOTREACHED */
822			}
823
824			totalSize -= basetable_entry_types[type].length;
825			(u_char*)position += basetable_entry_types[type].length;
826		}
827	}
828
829	/* qualify the numbers */
830	if (mp_naps > MAXCPU) {
831		printf("Warning: only using %d of %d available CPUs!\n",
832			MAXCPU, mp_naps);
833		mp_naps = MAXCPU;
834	}
835
836	/*
837	 * Count the BSP.
838	 * This is also used as a counter while starting the APs.
839	 */
840	mp_ncpus = 1;
841
842	--mp_naps;	/* subtract the BSP */
843}
844
845
846/*
847 * 2nd pass on motherboard's Intel MP specification table.
848 *
849 * sets:
850 *	boot_cpu_id
851 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
852 *	CPU_TO_ID(N), logical CPU to APIC ID table
853 *	IO_TO_ID(N), logical IO to APIC ID table
854 *	bus_data[N]
855 *	io_apic_ints[N]
856 */
857static int
858mptable_pass2(void)
859{
860	int     x;
861	mpcth_t cth;
862	int     totalSize;
863	void*   position;
864	int     count;
865	int     type;
866	int     apic, bus, cpu, intr;
867	int	i, j;
868	int	pgeflag;
869
870	POSTCODE(MPTABLE_PASS2_POST);
871
872	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
873
874	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
875	    M_DEVBUF, M_WAITOK);
876	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
877	    M_DEVBUF, M_WAITOK);
878	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
879	    M_DEVBUF, M_WAITOK);
880	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
881	    M_DEVBUF, M_WAITOK);
882
883	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
884
885	for (i = 0; i < mp_napics; i++) {
886		for (j = 0; j < mp_napics; j++) {
887			/* same page frame as a previous IO apic? */
888			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
889			    (io_apic_address[i] & PG_FRAME)) {
890				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
891					+ (NPTEPG-2-j) * PAGE_SIZE
892					+ (io_apic_address[i] & PAGE_MASK));
893				break;
894			}
895			/* use this slot if available */
896			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
897				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
898				    pgeflag | (io_apic_address[i] & PG_FRAME));
899				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
900					+ (NPTEPG-2-j) * PAGE_SIZE
901					+ (io_apic_address[i] & PAGE_MASK));
902				break;
903			}
904		}
905	}
906
907	/* clear various tables */
908	for (x = 0; x < NAPICID; ++x) {
909		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
910		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
911		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
912	}
913
914	/* clear bus data table */
915	for (x = 0; x < mp_nbusses; ++x)
916		bus_data[x].bus_id = 0xff;
917
918	/* clear IO APIC INT table */
919	for (x = 0; x < (nintrs + 1); ++x) {
920		io_apic_ints[x].int_type = 0xff;
921		io_apic_ints[x].int_vector = 0xff;
922	}
923
924	/* setup the cpu/apic mapping arrays */
925	boot_cpu_id = -1;
926
927	/* record whether PIC or virtual-wire mode */
928	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
929
930	/* check for use of 'default' configuration */
931	if (MPFPS_MPFB1 != 0)
932		return MPFPS_MPFB1;	/* return default configuration type */
933
934	if ((cth = mpfps->pap) == 0)
935		panic("MP Configuration Table Header MISSING!");
936
937	/* walk the table, recording info of interest */
938	totalSize = cth->base_table_length - sizeof(struct MPCTH);
939	position = (u_char *) cth + sizeof(struct MPCTH);
940	count = cth->entry_count;
941	apic = bus = intr = 0;
942	cpu = 1;				/* pre-count the BSP */
943
944	while (count--) {
945		switch (type = *(u_char *) position) {
946		case 0:
947			if (processor_entry(position, cpu))
948				++cpu;
949			break;
950		case 1:
951			if (bus_entry(position, bus))
952				++bus;
953			break;
954		case 2:
955			if (io_apic_entry(position, apic))
956				++apic;
957			break;
958		case 3:
959			if (int_entry(position, intr))
960				++intr;
961			break;
962		case 4:
963			/* int_entry(position); */
964			break;
965		default:
966			panic("mpfps Base Table HOSED!");
967			/* NOTREACHED */
968		}
969
970		totalSize -= basetable_entry_types[type].length;
971		(u_char *) position += basetable_entry_types[type].length;
972	}
973
974	if (boot_cpu_id == -1)
975		panic("NO BSP found!");
976
977	/* report fact that its NOT a default configuration */
978	return 0;
979}
980
981
982void
983assign_apic_irq(int apic, int intpin, int irq)
984{
985	int x;
986
987	if (int_to_apicintpin[irq].ioapic != -1)
988		panic("assign_apic_irq: inconsistent table");
989
990	int_to_apicintpin[irq].ioapic = apic;
991	int_to_apicintpin[irq].int_pin = intpin;
992	int_to_apicintpin[irq].apic_address = ioapic[apic];
993	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
994
995	for (x = 0; x < nintrs; x++) {
996		if ((io_apic_ints[x].int_type == 0 ||
997		     io_apic_ints[x].int_type == 3) &&
998		    io_apic_ints[x].int_vector == 0xff &&
999		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1000		    io_apic_ints[x].dst_apic_int == intpin)
1001			io_apic_ints[x].int_vector = irq;
1002	}
1003}
1004
1005void
1006revoke_apic_irq(int irq)
1007{
1008	int x;
1009	int oldapic;
1010	int oldintpin;
1011
1012	if (int_to_apicintpin[irq].ioapic == -1)
1013		panic("assign_apic_irq: inconsistent table");
1014
1015	oldapic = int_to_apicintpin[irq].ioapic;
1016	oldintpin = int_to_apicintpin[irq].int_pin;
1017
1018	int_to_apicintpin[irq].ioapic = -1;
1019	int_to_apicintpin[irq].int_pin = 0;
1020	int_to_apicintpin[irq].apic_address = NULL;
1021	int_to_apicintpin[irq].redirindex = 0;
1022
1023	for (x = 0; x < nintrs; x++) {
1024		if ((io_apic_ints[x].int_type == 0 ||
1025		     io_apic_ints[x].int_type == 3) &&
1026		    io_apic_ints[x].int_vector == 0xff &&
1027		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1028		    io_apic_ints[x].dst_apic_int == oldintpin)
1029			io_apic_ints[x].int_vector = 0xff;
1030	}
1031}
1032
1033
1034
1035static void
1036swap_apic_id(int apic, int oldid, int newid)
1037{
1038	int x;
1039	int oapic;
1040
1041
1042	if (oldid == newid)
1043		return;			/* Nothing to do */
1044
1045	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1046	       apic, oldid, newid);
1047
1048	/* Swap physical APIC IDs in interrupt entries */
1049	for (x = 0; x < nintrs; x++) {
1050		if (io_apic_ints[x].dst_apic_id == oldid)
1051			io_apic_ints[x].dst_apic_id = newid;
1052		else if (io_apic_ints[x].dst_apic_id == newid)
1053			io_apic_ints[x].dst_apic_id = oldid;
1054	}
1055
1056	/* Swap physical APIC IDs in IO_TO_ID mappings */
1057	for (oapic = 0; oapic < mp_napics; oapic++)
1058		if (IO_TO_ID(oapic) == newid)
1059			break;
1060
1061	if (oapic < mp_napics) {
1062		printf("Changing APIC ID for IO APIC #%d from "
1063		       "%d to %d in MP table\n",
1064		       oapic, newid, oldid);
1065		IO_TO_ID(oapic) = oldid;
1066	}
1067	IO_TO_ID(apic) = newid;
1068}
1069
1070
1071static void
1072fix_id_to_io_mapping(void)
1073{
1074	int x;
1075
1076	for (x = 0; x < NAPICID; x++)
1077		ID_TO_IO(x) = -1;
1078
1079	for (x = 0; x <= mp_naps; x++)
1080		if (CPU_TO_ID(x) < NAPICID)
1081			ID_TO_IO(CPU_TO_ID(x)) = x;
1082
1083	for (x = 0; x < mp_napics; x++)
1084		if (IO_TO_ID(x) < NAPICID)
1085			ID_TO_IO(IO_TO_ID(x)) = x;
1086}
1087
1088
1089static int
1090first_free_apic_id(void)
1091{
1092	int freeid, x;
1093
1094	for (freeid = 0; freeid < NAPICID; freeid++) {
1095		for (x = 0; x <= mp_naps; x++)
1096			if (CPU_TO_ID(x) == freeid)
1097				break;
1098		if (x <= mp_naps)
1099			continue;
1100		for (x = 0; x < mp_napics; x++)
1101			if (IO_TO_ID(x) == freeid)
1102				break;
1103		if (x < mp_napics)
1104			continue;
1105		return freeid;
1106	}
1107	return freeid;
1108}
1109
1110
1111static int
1112io_apic_id_acceptable(int apic, int id)
1113{
1114	int cpu;		/* Logical CPU number */
1115	int oapic;		/* Logical IO APIC number for other IO APIC */
1116
1117	if (id >= NAPICID)
1118		return 0;	/* Out of range */
1119
1120	for (cpu = 0; cpu <= mp_naps; cpu++)
1121		if (CPU_TO_ID(cpu) == id)
1122			return 0;	/* Conflict with CPU */
1123
1124	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1125		if (IO_TO_ID(oapic) == id)
1126			return 0;	/* Conflict with other APIC */
1127
1128	return 1;		/* ID is acceptable for IO APIC */
1129}
1130
1131
1132/*
1133 * parse an Intel MP specification table
1134 */
1135static void
1136fix_mp_table(void)
1137{
1138	int	x;
1139	int	id;
1140	int	bus_0 = 0;	/* Stop GCC warning */
1141	int	bus_pci = 0;	/* Stop GCC warning */
1142	int	num_pci_bus;
1143	int	apic;		/* IO APIC unit number */
1144	int     freeid;		/* Free physical APIC ID */
1145	int	physid;		/* Current physical IO APIC ID */
1146
1147	/*
1148	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1149	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1150	 * exists the BIOS must begin with bus entries for the PCI bus and use
1151	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1152	 * exists the BIOS can choose to ignore this ordering, and indeed many
1153	 * MP motherboards do ignore it.  This causes a problem when the PCI
1154	 * sub-system makes requests of the MP sub-system based on PCI bus
1155	 * numbers.	So here we look for the situation and renumber the
1156	 * busses and associated INTs in an effort to "make it right".
1157	 */
1158
1159	/* find bus 0, PCI bus, count the number of PCI busses */
1160	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1161		if (bus_data[x].bus_id == 0) {
1162			bus_0 = x;
1163		}
1164		if (bus_data[x].bus_type == PCI) {
1165			++num_pci_bus;
1166			bus_pci = x;
1167		}
1168	}
1169	/*
1170	 * bus_0 == slot of bus with ID of 0
1171	 * bus_pci == slot of last PCI bus encountered
1172	 */
1173
1174	/* check the 1 PCI bus case for sanity */
1175	/* if it is number 0 all is well */
1176	if (num_pci_bus == 1 &&
1177	    bus_data[bus_pci].bus_id != 0) {
1178
1179		/* mis-numbered, swap with whichever bus uses slot 0 */
1180
1181		/* swap the bus entry types */
1182		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1183		bus_data[bus_0].bus_type = PCI;
1184
1185		/* swap each relavant INTerrupt entry */
1186		id = bus_data[bus_pci].bus_id;
1187		for (x = 0; x < nintrs; ++x) {
1188			if (io_apic_ints[x].src_bus_id == id) {
1189				io_apic_ints[x].src_bus_id = 0;
1190			}
1191			else if (io_apic_ints[x].src_bus_id == 0) {
1192				io_apic_ints[x].src_bus_id = id;
1193			}
1194		}
1195	}
1196
1197	/* Assign IO APIC IDs.
1198	 *
1199	 * First try the existing ID. If a conflict is detected, try
1200	 * the ID in the MP table.  If a conflict is still detected, find
1201	 * a free id.
1202	 *
1203	 * We cannot use the ID_TO_IO table before all conflicts has been
1204	 * resolved and the table has been corrected.
1205	 */
1206	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1207
1208		/* First try to use the value set by the BIOS */
1209		physid = io_apic_get_id(apic);
1210		if (io_apic_id_acceptable(apic, physid)) {
1211			if (IO_TO_ID(apic) != physid)
1212				swap_apic_id(apic, IO_TO_ID(apic), physid);
1213			continue;
1214		}
1215
1216		/* Then check if the value in the MP table is acceptable */
1217		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1218			continue;
1219
1220		/* Last resort, find a free APIC ID and use it */
1221		freeid = first_free_apic_id();
1222		if (freeid >= NAPICID)
1223			panic("No free physical APIC IDs found");
1224
1225		if (io_apic_id_acceptable(apic, freeid)) {
1226			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1227			continue;
1228		}
1229		panic("Free physical APIC ID not usable");
1230	}
1231	fix_id_to_io_mapping();
1232
1233	/* detect and fix broken Compaq MP table */
1234	if (apic_int_type(0, 0) == -1) {
1235		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1236		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1237		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1238		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1239		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1240		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1241		nintrs++;
1242	}
1243}
1244
1245
1246/* Assign low level interrupt handlers */
1247static void
1248setup_apic_irq_mapping(void)
1249{
1250	int	x;
1251	int	int_vector;
1252
1253	/* Clear array */
1254	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1255		int_to_apicintpin[x].ioapic = -1;
1256		int_to_apicintpin[x].int_pin = 0;
1257		int_to_apicintpin[x].apic_address = NULL;
1258		int_to_apicintpin[x].redirindex = 0;
1259	}
1260
1261	/* First assign ISA/EISA interrupts */
1262	for (x = 0; x < nintrs; x++) {
1263		int_vector = io_apic_ints[x].src_bus_irq;
1264		if (int_vector < APIC_INTMAPSIZE &&
1265		    io_apic_ints[x].int_vector == 0xff &&
1266		    int_to_apicintpin[int_vector].ioapic == -1 &&
1267		    (apic_int_is_bus_type(x, ISA) ||
1268		     apic_int_is_bus_type(x, EISA)) &&
1269		    io_apic_ints[x].int_type == 0) {
1270			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1271					io_apic_ints[x].dst_apic_int,
1272					int_vector);
1273		}
1274	}
1275
1276	/* Assign first set of interrupts to intpins on IOAPIC #0 */
1277	for (x = 0; x < nintrs; x++) {
1278		int_vector = io_apic_ints[x].dst_apic_int;
1279		if (int_vector < APIC_INTMAPSIZE &&
1280		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1281		    io_apic_ints[x].int_vector == 0xff &&
1282		    int_to_apicintpin[int_vector].ioapic == -1 &&
1283		    (io_apic_ints[x].int_type == 0 ||
1284		     io_apic_ints[x].int_type == 3)) {
1285			assign_apic_irq(0,
1286					io_apic_ints[x].dst_apic_int,
1287					int_vector);
1288		}
1289	}
1290	/*
1291	 * Assign interrupts for remaining intpins.
1292	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1293	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1294	 * due to 8254 interrupts not being delivered can reuse that low level
1295	 * interrupt handler.
1296	 */
1297	int_vector = 0;
1298	while (int_vector < APIC_INTMAPSIZE &&
1299	       int_to_apicintpin[int_vector].ioapic != -1)
1300		int_vector++;
1301	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1302		if ((io_apic_ints[x].int_type == 0 ||
1303		     (io_apic_ints[x].int_type == 3 &&
1304		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1305		       io_apic_ints[x].dst_apic_int != 0))) &&
1306		    io_apic_ints[x].int_vector == 0xff) {
1307			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1308					io_apic_ints[x].dst_apic_int,
1309					int_vector);
1310			int_vector++;
1311			while (int_vector < APIC_INTMAPSIZE &&
1312			       int_to_apicintpin[int_vector].ioapic != -1)
1313				int_vector++;
1314		}
1315	}
1316}
1317
1318
1319static int
1320processor_entry(proc_entry_ptr entry, int cpu)
1321{
1322	/* check for usability */
1323	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1324		return 0;
1325
1326	if(entry->apic_id >= NAPICID)
1327		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1328	/* check for BSP flag */
1329	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1330		boot_cpu_id = entry->apic_id;
1331		CPU_TO_ID(0) = entry->apic_id;
1332		ID_TO_CPU(entry->apic_id) = 0;
1333		return 0;	/* its already been counted */
1334	}
1335
1336	/* add another AP to list, if less than max number of CPUs */
1337	else if (cpu < MAXCPU) {
1338		CPU_TO_ID(cpu) = entry->apic_id;
1339		ID_TO_CPU(entry->apic_id) = cpu;
1340		return 1;
1341	}
1342
1343	return 0;
1344}
1345
1346
1347static int
1348bus_entry(bus_entry_ptr entry, int bus)
1349{
1350	int     x;
1351	char    c, name[8];
1352
1353	/* encode the name into an index */
1354	for (x = 0; x < 6; ++x) {
1355		if ((c = entry->bus_type[x]) == ' ')
1356			break;
1357		name[x] = c;
1358	}
1359	name[x] = '\0';
1360
1361	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1362		panic("unknown bus type: '%s'", name);
1363
1364	bus_data[bus].bus_id = entry->bus_id;
1365	bus_data[bus].bus_type = x;
1366
1367	return 1;
1368}
1369
1370
1371static int
1372io_apic_entry(io_apic_entry_ptr entry, int apic)
1373{
1374	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1375		return 0;
1376
1377	IO_TO_ID(apic) = entry->apic_id;
1378	if (entry->apic_id < NAPICID)
1379		ID_TO_IO(entry->apic_id) = apic;
1380
1381	return 1;
1382}
1383
1384
1385static int
1386lookup_bus_type(char *name)
1387{
1388	int     x;
1389
1390	for (x = 0; x < MAX_BUSTYPE; ++x)
1391		if (strcmp(bus_type_table[x].name, name) == 0)
1392			return bus_type_table[x].type;
1393
1394	return UNKNOWN_BUSTYPE;
1395}
1396
1397
1398static int
1399int_entry(int_entry_ptr entry, int intr)
1400{
1401	int apic;
1402
1403	io_apic_ints[intr].int_type = entry->int_type;
1404	io_apic_ints[intr].int_flags = entry->int_flags;
1405	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1406	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1407	if (entry->dst_apic_id == 255) {
1408		/* This signal goes to all IO APICS.  Select an IO APIC
1409		   with sufficient number of interrupt pins */
1410		for (apic = 0; apic < mp_napics; apic++)
1411			if (((io_apic_read(apic, IOAPIC_VER) &
1412			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1413			    entry->dst_apic_int)
1414				break;
1415		if (apic < mp_napics)
1416			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1417		else
1418			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1419	} else
1420		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1421	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1422
1423	return 1;
1424}
1425
1426
1427static int
1428apic_int_is_bus_type(int intr, int bus_type)
1429{
1430	int     bus;
1431
1432	for (bus = 0; bus < mp_nbusses; ++bus)
1433		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1434		    && ((int) bus_data[bus].bus_type == bus_type))
1435			return 1;
1436
1437	return 0;
1438}
1439
1440
1441/*
1442 * Given a traditional ISA INT mask, return an APIC mask.
1443 */
1444u_int
1445isa_apic_mask(u_int isa_mask)
1446{
1447	int isa_irq;
1448	int apic_pin;
1449
1450#if defined(SKIP_IRQ15_REDIRECT)
1451	if (isa_mask == (1 << 15)) {
1452		printf("skipping ISA IRQ15 redirect\n");
1453		return isa_mask;
1454	}
1455#endif  /* SKIP_IRQ15_REDIRECT */
1456
1457	isa_irq = ffs(isa_mask);		/* find its bit position */
1458	if (isa_irq == 0)			/* doesn't exist */
1459		return 0;
1460	--isa_irq;				/* make it zero based */
1461
1462	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1463	if (apic_pin == -1)
1464		return 0;
1465
1466	return (1 << apic_pin);			/* convert pin# to a mask */
1467}
1468
1469
1470/*
1471 * Determine which APIC pin an ISA/EISA INT is attached to.
1472 */
1473#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1474#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1475#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1476#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1477
1478#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1479int
1480isa_apic_irq(int isa_irq)
1481{
1482	int     intr;
1483
1484	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1485		if (INTTYPE(intr) == 0) {		/* standard INT */
1486			if (SRCBUSIRQ(intr) == isa_irq) {
1487				if (apic_int_is_bus_type(intr, ISA) ||
1488			            apic_int_is_bus_type(intr, EISA))
1489					return INTIRQ(intr);	/* found */
1490			}
1491		}
1492	}
1493	return -1;					/* NOT found */
1494}
1495
1496
1497/*
1498 * Determine which APIC pin a PCI INT is attached to.
1499 */
1500#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1501#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1502#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1503int
1504pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1505{
1506	int     intr;
1507
1508	--pciInt;					/* zero based */
1509
1510	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1511		if ((INTTYPE(intr) == 0)		/* standard INT */
1512		    && (SRCBUSID(intr) == pciBus)
1513		    && (SRCBUSDEVICE(intr) == pciDevice)
1514		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1515			if (apic_int_is_bus_type(intr, PCI))
1516				return INTIRQ(intr);	/* exact match */
1517
1518	return -1;					/* NOT found */
1519}
1520
1521int
1522next_apic_irq(int irq)
1523{
1524	int intr, ointr;
1525	int bus, bustype;
1526
1527	bus = 0;
1528	bustype = 0;
1529	for (intr = 0; intr < nintrs; intr++) {
1530		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1531			continue;
1532		bus = SRCBUSID(intr);
1533		bustype = apic_bus_type(bus);
1534		if (bustype != ISA &&
1535		    bustype != EISA &&
1536		    bustype != PCI)
1537			continue;
1538		break;
1539	}
1540	if (intr >= nintrs) {
1541		return -1;
1542	}
1543	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1544		if (INTTYPE(ointr) != 0)
1545			continue;
1546		if (bus != SRCBUSID(ointr))
1547			continue;
1548		if (bustype == PCI) {
1549			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1550				continue;
1551			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1552				continue;
1553		}
1554		if (bustype == ISA || bustype == EISA) {
1555			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1556				continue;
1557		}
1558		if (INTPIN(intr) == INTPIN(ointr))
1559			continue;
1560		break;
1561	}
1562	if (ointr >= nintrs) {
1563		return -1;
1564	}
1565	return INTIRQ(ointr);
1566}
1567#undef SRCBUSLINE
1568#undef SRCBUSDEVICE
1569#undef SRCBUSID
1570#undef SRCBUSIRQ
1571
1572#undef INTPIN
1573#undef INTIRQ
1574#undef INTAPIC
1575#undef INTTYPE
1576
1577
1578/*
1579 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1580 *
1581 * XXX FIXME:
1582 *  Exactly what this means is unclear at this point.  It is a solution
1583 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1584 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1585 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1586 *  option.
1587 */
1588int
1589undirect_isa_irq(int rirq)
1590{
1591#if defined(READY)
1592	if (bootverbose)
1593	    printf("Freeing redirected ISA irq %d.\n", rirq);
1594	/** FIXME: tickle the MB redirector chip */
1595	return -1;
1596#else
1597	if (bootverbose)
1598	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1599	return 0;
1600#endif  /* READY */
1601}
1602
1603
1604/*
1605 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1606 */
1607int
1608undirect_pci_irq(int rirq)
1609{
1610#if defined(READY)
1611	if (bootverbose)
1612		printf("Freeing redirected PCI irq %d.\n", rirq);
1613
1614	/** FIXME: tickle the MB redirector chip */
1615	return -1;
1616#else
1617	if (bootverbose)
1618		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1619		       rirq);
1620	return 0;
1621#endif  /* READY */
1622}
1623
1624
1625/*
1626 * given a bus ID, return:
1627 *  the bus type if found
1628 *  -1 if NOT found
1629 */
1630int
1631apic_bus_type(int id)
1632{
1633	int     x;
1634
1635	for (x = 0; x < mp_nbusses; ++x)
1636		if (bus_data[x].bus_id == id)
1637			return bus_data[x].bus_type;
1638
1639	return -1;
1640}
1641
1642
1643/*
1644 * given a LOGICAL APIC# and pin#, return:
1645 *  the associated src bus ID if found
1646 *  -1 if NOT found
1647 */
1648int
1649apic_src_bus_id(int apic, int pin)
1650{
1651	int     x;
1652
1653	/* search each of the possible INTerrupt sources */
1654	for (x = 0; x < nintrs; ++x)
1655		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1656		    (pin == io_apic_ints[x].dst_apic_int))
1657			return (io_apic_ints[x].src_bus_id);
1658
1659	return -1;		/* NOT found */
1660}
1661
1662
1663/*
1664 * given a LOGICAL APIC# and pin#, return:
1665 *  the associated src bus IRQ if found
1666 *  -1 if NOT found
1667 */
1668int
1669apic_src_bus_irq(int apic, int pin)
1670{
1671	int     x;
1672
1673	for (x = 0; x < nintrs; x++)
1674		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1675		    (pin == io_apic_ints[x].dst_apic_int))
1676			return (io_apic_ints[x].src_bus_irq);
1677
1678	return -1;		/* NOT found */
1679}
1680
1681
1682/*
1683 * given a LOGICAL APIC# and pin#, return:
1684 *  the associated INTerrupt type if found
1685 *  -1 if NOT found
1686 */
1687int
1688apic_int_type(int apic, int pin)
1689{
1690	int     x;
1691
1692	/* search each of the possible INTerrupt sources */
1693	for (x = 0; x < nintrs; ++x)
1694		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1695		    (pin == io_apic_ints[x].dst_apic_int))
1696			return (io_apic_ints[x].int_type);
1697
1698	return -1;		/* NOT found */
1699}
1700
1701int
1702apic_irq(int apic, int pin)
1703{
1704	int x;
1705	int res;
1706
1707	for (x = 0; x < nintrs; ++x)
1708		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1709		    (pin == io_apic_ints[x].dst_apic_int)) {
1710			res = io_apic_ints[x].int_vector;
1711			if (res == 0xff)
1712				return -1;
1713			if (apic != int_to_apicintpin[res].ioapic)
1714				panic("apic_irq: inconsistent table");
1715			if (pin != int_to_apicintpin[res].int_pin)
1716				panic("apic_irq inconsistent table (2)");
1717			return res;
1718		}
1719	return -1;
1720}
1721
1722
1723/*
1724 * given a LOGICAL APIC# and pin#, return:
1725 *  the associated trigger mode if found
1726 *  -1 if NOT found
1727 */
1728int
1729apic_trigger(int apic, int pin)
1730{
1731	int     x;
1732
1733	/* search each of the possible INTerrupt sources */
1734	for (x = 0; x < nintrs; ++x)
1735		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1736		    (pin == io_apic_ints[x].dst_apic_int))
1737			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1738
1739	return -1;		/* NOT found */
1740}
1741
1742
1743/*
1744 * given a LOGICAL APIC# and pin#, return:
1745 *  the associated 'active' level if found
1746 *  -1 if NOT found
1747 */
1748int
1749apic_polarity(int apic, int pin)
1750{
1751	int     x;
1752
1753	/* search each of the possible INTerrupt sources */
1754	for (x = 0; x < nintrs; ++x)
1755		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1756		    (pin == io_apic_ints[x].dst_apic_int))
1757			return (io_apic_ints[x].int_flags & 0x03);
1758
1759	return -1;		/* NOT found */
1760}
1761
1762
1763/*
1764 * set data according to MP defaults
1765 * FIXME: probably not complete yet...
1766 */
1767static void
1768default_mp_table(int type)
1769{
1770	int     ap_cpu_id;
1771#if defined(APIC_IO)
1772	int     io_apic_id;
1773	int     pin;
1774#endif	/* APIC_IO */
1775
1776#if 0
1777	printf("  MP default config type: %d\n", type);
1778	switch (type) {
1779	case 1:
1780		printf("   bus: ISA, APIC: 82489DX\n");
1781		break;
1782	case 2:
1783		printf("   bus: EISA, APIC: 82489DX\n");
1784		break;
1785	case 3:
1786		printf("   bus: EISA, APIC: 82489DX\n");
1787		break;
1788	case 4:
1789		printf("   bus: MCA, APIC: 82489DX\n");
1790		break;
1791	case 5:
1792		printf("   bus: ISA+PCI, APIC: Integrated\n");
1793		break;
1794	case 6:
1795		printf("   bus: EISA+PCI, APIC: Integrated\n");
1796		break;
1797	case 7:
1798		printf("   bus: MCA+PCI, APIC: Integrated\n");
1799		break;
1800	default:
1801		printf("   future type\n");
1802		break;
1803		/* NOTREACHED */
1804	}
1805#endif	/* 0 */
1806
1807	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1808	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1809
1810	/* BSP */
1811	CPU_TO_ID(0) = boot_cpu_id;
1812	ID_TO_CPU(boot_cpu_id) = 0;
1813
1814	/* one and only AP */
1815	CPU_TO_ID(1) = ap_cpu_id;
1816	ID_TO_CPU(ap_cpu_id) = 1;
1817
1818#if defined(APIC_IO)
1819	/* one and only IO APIC */
1820	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1821
1822	/*
1823	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1824	 * necessary as some hardware isn't properly setting up the IO APIC
1825	 */
1826#if defined(REALLY_ANAL_IOAPICID_VALUE)
1827	if (io_apic_id != 2) {
1828#else
1829	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1830#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1831		io_apic_set_id(0, 2);
1832		io_apic_id = 2;
1833	}
1834	IO_TO_ID(0) = io_apic_id;
1835	ID_TO_IO(io_apic_id) = 0;
1836#endif	/* APIC_IO */
1837
1838	/* fill out bus entries */
1839	switch (type) {
1840	case 1:
1841	case 2:
1842	case 3:
1843	case 4:
1844	case 5:
1845	case 6:
1846	case 7:
1847		bus_data[0].bus_id = default_data[type - 1][1];
1848		bus_data[0].bus_type = default_data[type - 1][2];
1849		bus_data[1].bus_id = default_data[type - 1][3];
1850		bus_data[1].bus_type = default_data[type - 1][4];
1851		break;
1852
1853	/* case 4: case 7:		   MCA NOT supported */
1854	default:		/* illegal/reserved */
1855		panic("BAD default MP config: %d", type);
1856		/* NOTREACHED */
1857	}
1858
1859#if defined(APIC_IO)
1860	/* general cases from MP v1.4, table 5-2 */
1861	for (pin = 0; pin < 16; ++pin) {
1862		io_apic_ints[pin].int_type = 0;
1863		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1864		io_apic_ints[pin].src_bus_id = 0;
1865		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1866		io_apic_ints[pin].dst_apic_id = io_apic_id;
1867		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1868	}
1869
1870	/* special cases from MP v1.4, table 5-2 */
1871	if (type == 2) {
1872		io_apic_ints[2].int_type = 0xff;	/* N/C */
1873		io_apic_ints[13].int_type = 0xff;	/* N/C */
1874#if !defined(APIC_MIXED_MODE)
1875		/** FIXME: ??? */
1876		panic("sorry, can't support type 2 default yet");
1877#endif	/* APIC_MIXED_MODE */
1878	}
1879	else
1880		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1881
1882	if (type == 7)
1883		io_apic_ints[0].int_type = 0xff;	/* N/C */
1884	else
1885		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1886#endif	/* APIC_IO */
1887}
1888
1889
1890/*
1891 * initialize all the SMP locks
1892 */
1893
1894/* critical region around IO APIC, apic_imen */
1895struct simplelock	imen_lock;
1896
1897/* critical region around splxx(), cpl, cml, cil, ipending */
1898struct simplelock	cpl_lock;
1899
1900/* Make FAST_INTR() routines sequential */
1901struct simplelock	fast_intr_lock;
1902
1903/* critical region around INTR() routines */
1904struct simplelock	intr_lock;
1905
1906/* lock region used by kernel profiling */
1907struct simplelock	mcount_lock;
1908
1909#ifdef USE_COMLOCK
1910/* locks com (tty) data/hardware accesses: a FASTINTR() */
1911struct simplelock	com_lock;
1912#endif /* USE_COMLOCK */
1913
1914/* lock around the MP rendezvous */
1915static struct simplelock smp_rv_lock;
1916
1917/* only 1 CPU can panic at a time :) */
1918struct simplelock	panic_lock;
1919
1920static void
1921init_locks(void)
1922{
1923#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1924	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1925#endif
1926
1927	s_lock_init((struct simplelock*)&mcount_lock);
1928
1929	s_lock_init((struct simplelock*)&fast_intr_lock);
1930	s_lock_init((struct simplelock*)&intr_lock);
1931	s_lock_init((struct simplelock*)&imen_lock);
1932	s_lock_init((struct simplelock*)&cpl_lock);
1933	s_lock_init(&smp_rv_lock);
1934	s_lock_init(&panic_lock);
1935
1936#ifdef USE_COMLOCK
1937	s_lock_init((struct simplelock*)&com_lock);
1938#endif /* USE_COMLOCK */
1939
1940	s_lock_init(&ap_boot_lock);
1941}
1942
1943/*
1944 * start each AP in our list
1945 */
1946static int
1947start_all_aps(u_int boot_addr)
1948{
1949	int     x, i, pg;
1950	u_char  mpbiosreason;
1951	u_long  mpbioswarmvec;
1952	struct globaldata *gd;
1953	char *stack;
1954
1955	POSTCODE(START_ALL_APS_POST);
1956
1957	/* initialize BSP's local APIC */
1958	apic_initialize();
1959	bsp_apic_ready = 1;
1960
1961	/* install the AP 1st level boot code */
1962	install_ap_tramp(boot_addr);
1963
1964
1965	/* save the current value of the warm-start vector */
1966	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1967#ifndef PC98
1968	outb(CMOS_REG, BIOS_RESET);
1969	mpbiosreason = inb(CMOS_DATA);
1970#endif
1971
1972	/* record BSP in CPU map */
1973	all_cpus = 1;
1974
1975	/* set up 0 -> 4MB P==V mapping for AP boot */
1976	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1977	invltlb();
1978
1979	/* start each AP */
1980	for (x = 1; x <= mp_naps; ++x) {
1981
1982		/* This is a bit verbose, it will go away soon.  */
1983
1984		/* first page of AP's private space */
1985		pg = x * i386_btop(sizeof(struct privatespace));
1986
1987		/* allocate a new private data page */
1988		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1989
1990		/* wire it into the private page table page */
1991		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1992
1993		/* allocate and set up an idle stack data page */
1994		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1995		for (i = 0; i < UPAGES; i++)
1996			SMPpt[pg + 5 + i] = (pt_entry_t)
1997			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1998
1999		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
2000		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
2001		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
2002		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
2003
2004		/* prime data page for it to use */
2005		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
2006		gd->gd_cpuid = x;
2007		gd->gd_cpu_lockid = x << 24;
2008		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
2009		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
2010		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
2011		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
2012		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
2013		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
2014		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
2015		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
2016
2017		/* setup a vector to our boot code */
2018		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2019		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2020#ifndef PC98
2021		outb(CMOS_REG, BIOS_RESET);
2022		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2023#endif
2024
2025		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2026		bootAP = x;
2027
2028		/* attempt to start the Application Processor */
2029		CHECK_INIT(99);	/* setup checkpoints */
2030		if (!start_ap(x, boot_addr)) {
2031			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2032			CHECK_PRINT("trace");	/* show checkpoints */
2033			/* better panic as the AP may be running loose */
2034			printf("panic y/n? [y] ");
2035			if (cngetc() != 'n')
2036				panic("bye-bye");
2037		}
2038		CHECK_PRINT("trace");		/* show checkpoints */
2039
2040		/* record its version info */
2041		cpu_apic_versions[x] = cpu_apic_versions[0];
2042
2043		all_cpus |= (1 << x);		/* record AP in CPU map */
2044	}
2045
2046	/* build our map of 'other' CPUs */
2047	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2048
2049	/* fill in our (BSP) APIC version */
2050	cpu_apic_versions[0] = lapic.version;
2051
2052	/* restore the warmstart vector */
2053	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2054#ifndef PC98
2055	outb(CMOS_REG, BIOS_RESET);
2056	outb(CMOS_DATA, mpbiosreason);
2057#endif
2058
2059	/*
2060	 * Set up the idle context for the BSP.  Similar to above except
2061	 * that some was done by locore, some by pmap.c and some is implicit
2062	 * because the BSP is cpu#0 and the page is initially zero, and also
2063	 * because we can refer to variables by name on the BSP..
2064	 */
2065
2066	/* Allocate and setup BSP idle stack */
2067	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2068	for (i = 0; i < UPAGES; i++)
2069		SMPpt[5 + i] = (pt_entry_t)
2070		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2071
2072	*(int *)PTD = 0;
2073	pmap_set_opt();
2074
2075	/* number of APs actually started */
2076	return mp_ncpus - 1;
2077}
2078
2079
2080/*
2081 * load the 1st level AP boot code into base memory.
2082 */
2083
2084/* targets for relocation */
2085extern void bigJump(void);
2086extern void bootCodeSeg(void);
2087extern void bootDataSeg(void);
2088extern void MPentry(void);
2089extern u_int MP_GDT;
2090extern u_int mp_gdtbase;
2091
2092static void
2093install_ap_tramp(u_int boot_addr)
2094{
2095	int     x;
2096	int     size = *(int *) ((u_long) & bootMP_size);
2097	u_char *src = (u_char *) ((u_long) bootMP);
2098	u_char *dst = (u_char *) boot_addr + KERNBASE;
2099	u_int   boot_base = (u_int) bootMP;
2100	u_int8_t *dst8;
2101	u_int16_t *dst16;
2102	u_int32_t *dst32;
2103
2104	POSTCODE(INSTALL_AP_TRAMP_POST);
2105
2106	for (x = 0; x < size; ++x)
2107		*dst++ = *src++;
2108
2109	/*
2110	 * modify addresses in code we just moved to basemem. unfortunately we
2111	 * need fairly detailed info about mpboot.s for this to work.  changes
2112	 * to mpboot.s might require changes here.
2113	 */
2114
2115	/* boot code is located in KERNEL space */
2116	dst = (u_char *) boot_addr + KERNBASE;
2117
2118	/* modify the lgdt arg */
2119	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2120	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2121
2122	/* modify the ljmp target for MPentry() */
2123	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2124	*dst32 = ((u_int) MPentry - KERNBASE);
2125
2126	/* modify the target for boot code segment */
2127	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2128	dst8 = (u_int8_t *) (dst16 + 1);
2129	*dst16 = (u_int) boot_addr & 0xffff;
2130	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2131
2132	/* modify the target for boot data segment */
2133	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2134	dst8 = (u_int8_t *) (dst16 + 1);
2135	*dst16 = (u_int) boot_addr & 0xffff;
2136	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2137}
2138
2139
2140/*
2141 * this function starts the AP (application processor) identified
2142 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2143 * to accomplish this.  This is necessary because of the nuances
2144 * of the different hardware we might encounter.  It ain't pretty,
2145 * but it seems to work.
2146 */
2147static int
2148start_ap(int logical_cpu, u_int boot_addr)
2149{
2150	int     physical_cpu;
2151	int     vector;
2152	int     cpus;
2153	u_long  icr_lo, icr_hi;
2154
2155	POSTCODE(START_AP_POST);
2156
2157	/* get the PHYSICAL APIC ID# */
2158	physical_cpu = CPU_TO_ID(logical_cpu);
2159
2160	/* calculate the vector */
2161	vector = (boot_addr >> 12) & 0xff;
2162
2163	/* used as a watchpoint to signal AP startup */
2164	cpus = mp_ncpus;
2165
2166	/*
2167	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2168	 * and running the target CPU. OR this INIT IPI might be latched (P5
2169	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2170	 * ignored.
2171	 */
2172
2173	/* setup the address for the target AP */
2174	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2175	icr_hi |= (physical_cpu << 24);
2176	lapic.icr_hi = icr_hi;
2177
2178	/* do an INIT IPI: assert RESET */
2179	icr_lo = lapic.icr_lo & 0xfff00000;
2180	lapic.icr_lo = icr_lo | 0x0000c500;
2181
2182	/* wait for pending status end */
2183	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2184		 /* spin */ ;
2185
2186	/* do an INIT IPI: deassert RESET */
2187	lapic.icr_lo = icr_lo | 0x00008500;
2188
2189	/* wait for pending status end */
2190	u_sleep(10000);		/* wait ~10mS */
2191	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2192		 /* spin */ ;
2193
2194	/*
2195	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2196	 * latched, (P5 bug) this 1st STARTUP would then terminate
2197	 * immediately, and the previously started INIT IPI would continue. OR
2198	 * the previous INIT IPI has already run. and this STARTUP IPI will
2199	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2200	 * will run.
2201	 */
2202
2203	/* do a STARTUP IPI */
2204	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2205	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2206		 /* spin */ ;
2207	u_sleep(200);		/* wait ~200uS */
2208
2209	/*
2210	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2211	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2212	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2213	 * recognized after hardware RESET or INIT IPI.
2214	 */
2215
2216	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2217	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2218		 /* spin */ ;
2219	u_sleep(200);		/* wait ~200uS */
2220
2221	/* wait for it to start */
2222	set_apic_timer(5000000);/* == 5 seconds */
2223	while (read_apic_timer())
2224		if (mp_ncpus > cpus)
2225			return 1;	/* return SUCCESS */
2226
2227	return 0;		/* return FAILURE */
2228}
2229
2230/*
2231 * Flush the TLB on all other CPU's
2232 *
2233 * XXX: Needs to handshake and wait for completion before proceding.
2234 */
2235void
2236smp_invltlb(void)
2237{
2238#if defined(APIC_IO)
2239	if (smp_started && invltlb_ok)
2240		all_but_self_ipi(XINVLTLB_OFFSET);
2241#endif  /* APIC_IO */
2242}
2243
2244void
2245invlpg(u_int addr)
2246{
2247	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2248
2249	/* send a message to the other CPUs */
2250	smp_invltlb();
2251}
2252
2253void
2254invltlb(void)
2255{
2256	u_long  temp;
2257
2258	/*
2259	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2260	 * inlined.
2261	 */
2262	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2263
2264	/* send a message to the other CPUs */
2265	smp_invltlb();
2266}
2267
2268
2269/*
2270 * When called the executing CPU will send an IPI to all other CPUs
2271 *  requesting that they halt execution.
2272 *
2273 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2274 *
2275 *  - Signals all CPUs in map to stop.
2276 *  - Waits for each to stop.
2277 *
2278 * Returns:
2279 *  -1: error
2280 *   0: NA
2281 *   1: ok
2282 *
2283 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2284 *            from executing at same time.
2285 */
2286int
2287stop_cpus(u_int map)
2288{
2289	int count = 0;
2290
2291	if (!smp_started)
2292		return 0;
2293
2294	/* send the Xcpustop IPI to all CPUs in map */
2295	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2296
2297	while (count++ < 100000 && (stopped_cpus & map) != map)
2298		/* spin */ ;
2299
2300#ifdef DIAGNOSTIC
2301	if ((stopped_cpus & map) != map)
2302		printf("Warning: CPUs 0x%x did not stop!\n",
2303		    (~(stopped_cpus & map)) & map);
2304#endif
2305
2306	return 1;
2307}
2308
2309
2310/*
2311 * Called by a CPU to restart stopped CPUs.
2312 *
2313 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2314 *
2315 *  - Signals all CPUs in map to restart.
2316 *  - Waits for each to restart.
2317 *
2318 * Returns:
2319 *  -1: error
2320 *   0: NA
2321 *   1: ok
2322 */
2323int
2324restart_cpus(u_int map)
2325{
2326	int count = 0;
2327
2328	if (!smp_started)
2329		return 0;
2330
2331	started_cpus = map;		/* signal other cpus to restart */
2332
2333	/* wait for each to clear its bit */
2334	while (count++ < 100000 && (stopped_cpus & map) != 0)
2335		/* spin */ ;
2336
2337#ifdef DIAGNOSTIC
2338	if ((stopped_cpus & map) != 0)
2339		printf("Warning: CPUs 0x%x did not restart!\n",
2340		    (~(stopped_cpus & map)) & map);
2341#endif
2342
2343	return 1;
2344}
2345
2346int smp_active = 0;	/* are the APs allowed to run? */
2347SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2348
2349/* XXX maybe should be hw.ncpu */
2350static int smp_cpus = 1;	/* how many cpu's running */
2351SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2352
2353int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2354SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2355
2356/* Warning: Do not staticize.  Used from swtch.s */
2357int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2358SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2359	   &do_page_zero_idle, 0, "");
2360
2361/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2362int forward_irq_enabled = 1;
2363SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2364	   &forward_irq_enabled, 0, "");
2365
2366/* Enable forwarding of a signal to a process running on a different CPU */
2367static int forward_signal_enabled = 1;
2368SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2369	   &forward_signal_enabled, 0, "");
2370
2371/* Enable forwarding of roundrobin to all other cpus */
2372static int forward_roundrobin_enabled = 1;
2373SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2374	   &forward_roundrobin_enabled, 0, "");
2375
2376/*
2377 * This is called once the rest of the system is up and running and we're
2378 * ready to let the AP's out of the pen.
2379 */
2380void ap_init(void);
2381
2382void
2383ap_init(void)
2384{
2385	u_int	apic_id;
2386
2387	/* lock against other AP's that are waking up */
2388	s_lock(&ap_boot_lock);
2389
2390	/* BSP may have changed PTD while we're waiting for the lock */
2391	cpu_invltlb();
2392
2393	smp_cpus++;
2394
2395#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2396	lidt(&r_idt);
2397#endif
2398
2399	/* Build our map of 'other' CPUs. */
2400	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2401
2402	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2403
2404	/* set up CPU registers and state */
2405	cpu_setregs();
2406
2407	/* set up FPU state on the AP */
2408	npxinit(__INITIAL_NPXCW__);
2409
2410	/* A quick check from sanity claus */
2411	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2412	if (PCPU_GET(cpuid) != apic_id) {
2413		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2414		printf("SMP: apic_id = %d\n", apic_id);
2415		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2416		panic("cpuid mismatch! boom!!");
2417	}
2418
2419	/* Init local apic for irq's */
2420	apic_initialize();
2421
2422	/* Set memory range attributes for this CPU to match the BSP */
2423	mem_range_AP_init();
2424
2425	/*
2426	 * Activate smp_invltlb, although strictly speaking, this isn't
2427	 * quite correct yet.  We should have a bitfield for cpus willing
2428	 * to accept TLB flush IPI's or something and sync them.
2429	 */
2430	if (smp_cpus == mp_ncpus) {
2431		invltlb_ok = 1;
2432		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2433		smp_active = 1;	 /* historic */
2434	}
2435
2436	/* let other AP's wake up now */
2437	s_unlock(&ap_boot_lock);
2438
2439	/* wait until all the AP's are up */
2440	while (smp_started == 0)
2441		; /* nothing */
2442
2443	/*
2444	 * Set curproc to our per-cpu idleproc so that mutexes have
2445	 * something unique to lock with.
2446	 */
2447	PCPU_SET(curproc, PCPU_GET(idleproc));
2448
2449	microuptime(PCPU_PTR(switchtime));
2450	PCPU_SET(switchticks, ticks);
2451
2452	/* ok, now grab sched_lock and enter the scheduler */
2453	enable_intr();
2454	mtx_enter(&sched_lock, MTX_SPIN);
2455	cpu_throw();	/* doesn't return */
2456
2457	panic("scheduler returned us to ap_init");
2458}
2459
2460#ifdef BETTER_CLOCK
2461
2462#define CHECKSTATE_USER	0
2463#define CHECKSTATE_SYS	1
2464#define CHECKSTATE_INTR	2
2465
2466/* Do not staticize.  Used from apic_vector.s */
2467struct proc*	checkstate_curproc[MAXCPU];
2468int		checkstate_cpustate[MAXCPU];
2469u_long		checkstate_pc[MAXCPU];
2470
2471#define PC_TO_INDEX(pc, prof)				\
2472        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2473            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2474
2475static void
2476addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2477{
2478	int i;
2479	struct uprof *prof;
2480	u_long pc;
2481
2482	pc = checkstate_pc[id];
2483	prof = &p->p_stats->p_prof;
2484	if (pc >= prof->pr_off &&
2485	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2486		if ((p->p_flag & P_OWEUPC) == 0) {
2487			prof->pr_addr = pc;
2488			prof->pr_ticks = 1;
2489			p->p_flag |= P_OWEUPC;
2490		}
2491		*astmap |= (1 << id);
2492	}
2493}
2494
2495static void
2496forwarded_statclock(int id, int pscnt, int *astmap)
2497{
2498	struct pstats *pstats;
2499	long rss;
2500	struct rusage *ru;
2501	struct vmspace *vm;
2502	int cpustate;
2503	struct proc *p;
2504#ifdef GPROF
2505	register struct gmonparam *g;
2506	int i;
2507#endif
2508
2509	p = checkstate_curproc[id];
2510	cpustate = checkstate_cpustate[id];
2511
2512	/* XXX */
2513	if (p->p_ithd)
2514		cpustate = CHECKSTATE_INTR;
2515	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2516		cpustate = CHECKSTATE_SYS;
2517
2518	switch (cpustate) {
2519	case CHECKSTATE_USER:
2520		if (p->p_flag & P_PROFIL)
2521			addupc_intr_forwarded(p, id, astmap);
2522		if (pscnt > 1)
2523			return;
2524		p->p_uticks++;
2525		if (p->p_nice > NZERO)
2526			cp_time[CP_NICE]++;
2527		else
2528			cp_time[CP_USER]++;
2529		break;
2530	case CHECKSTATE_SYS:
2531#ifdef GPROF
2532		/*
2533		 * Kernel statistics are just like addupc_intr, only easier.
2534		 */
2535		g = &_gmonparam;
2536		if (g->state == GMON_PROF_ON) {
2537			i = checkstate_pc[id] - g->lowpc;
2538			if (i < g->textsize) {
2539				i /= HISTFRACTION * sizeof(*g->kcount);
2540				g->kcount[i]++;
2541			}
2542		}
2543#endif
2544		if (pscnt > 1)
2545			return;
2546
2547		p->p_sticks++;
2548		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2549			cp_time[CP_IDLE]++;
2550		else
2551			cp_time[CP_SYS]++;
2552		break;
2553	case CHECKSTATE_INTR:
2554	default:
2555#ifdef GPROF
2556		/*
2557		 * Kernel statistics are just like addupc_intr, only easier.
2558		 */
2559		g = &_gmonparam;
2560		if (g->state == GMON_PROF_ON) {
2561			i = checkstate_pc[id] - g->lowpc;
2562			if (i < g->textsize) {
2563				i /= HISTFRACTION * sizeof(*g->kcount);
2564				g->kcount[i]++;
2565			}
2566		}
2567#endif
2568		if (pscnt > 1)
2569			return;
2570		if (p)
2571			p->p_iticks++;
2572		cp_time[CP_INTR]++;
2573	}
2574	schedclock(p);
2575
2576	/* Update resource usage integrals and maximums. */
2577	if ((pstats = p->p_stats) != NULL &&
2578	    (ru = &pstats->p_ru) != NULL &&
2579	    (vm = p->p_vmspace) != NULL) {
2580		ru->ru_ixrss += pgtok(vm->vm_tsize);
2581		ru->ru_idrss += pgtok(vm->vm_dsize);
2582		ru->ru_isrss += pgtok(vm->vm_ssize);
2583		rss = pgtok(vmspace_resident_count(vm));
2584		if (ru->ru_maxrss < rss)
2585			ru->ru_maxrss = rss;
2586	}
2587}
2588
2589void
2590forward_statclock(int pscnt)
2591{
2592	int map;
2593	int id;
2594	int i;
2595
2596	/* Kludge. We don't yet have separate locks for the interrupts
2597	 * and the kernel. This means that we cannot let the other processors
2598	 * handle complex interrupts while inhibiting them from entering
2599	 * the kernel in a non-interrupt context.
2600	 *
2601	 * What we can do, without changing the locking mechanisms yet,
2602	 * is letting the other processors handle a very simple interrupt
2603	 * (wich determines the processor states), and do the main
2604	 * work ourself.
2605	 */
2606
2607	if (!smp_started || !invltlb_ok || cold || panicstr)
2608		return;
2609
2610	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2611
2612	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2613	checkstate_probed_cpus = 0;
2614	if (map != 0)
2615		selected_apic_ipi(map,
2616				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2617
2618	i = 0;
2619	while (checkstate_probed_cpus != map) {
2620		/* spin */
2621		i++;
2622		if (i == 100000) {
2623#ifdef BETTER_CLOCK_DIAGNOSTIC
2624			printf("forward_statclock: checkstate %x\n",
2625			       checkstate_probed_cpus);
2626#endif
2627			break;
2628		}
2629	}
2630
2631	/*
2632	 * Step 2: walk through other processors processes, update ticks and
2633	 * profiling info.
2634	 */
2635
2636	map = 0;
2637	for (id = 0; id < mp_ncpus; id++) {
2638		if (id == PCPU_GET(cpuid))
2639			continue;
2640		if (((1 << id) & checkstate_probed_cpus) == 0)
2641			continue;
2642		forwarded_statclock(id, pscnt, &map);
2643	}
2644	if (map != 0) {
2645		checkstate_need_ast |= map;
2646		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2647		i = 0;
2648		while ((checkstate_need_ast & map) != 0) {
2649			/* spin */
2650			i++;
2651			if (i > 100000) {
2652#ifdef BETTER_CLOCK_DIAGNOSTIC
2653				printf("forward_statclock: dropped ast 0x%x\n",
2654				       checkstate_need_ast & map);
2655#endif
2656				break;
2657			}
2658		}
2659	}
2660}
2661
2662void
2663forward_hardclock(int pscnt)
2664{
2665	int map;
2666	int id;
2667	struct proc *p;
2668	struct pstats *pstats;
2669	int i;
2670
2671	/* Kludge. We don't yet have separate locks for the interrupts
2672	 * and the kernel. This means that we cannot let the other processors
2673	 * handle complex interrupts while inhibiting them from entering
2674	 * the kernel in a non-interrupt context.
2675	 *
2676	 * What we can do, without changing the locking mechanisms yet,
2677	 * is letting the other processors handle a very simple interrupt
2678	 * (wich determines the processor states), and do the main
2679	 * work ourself.
2680	 */
2681
2682	if (!smp_started || !invltlb_ok || cold || panicstr)
2683		return;
2684
2685	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2686
2687	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2688	checkstate_probed_cpus = 0;
2689	if (map != 0)
2690		selected_apic_ipi(map,
2691				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2692
2693	i = 0;
2694	while (checkstate_probed_cpus != map) {
2695		/* spin */
2696		i++;
2697		if (i == 100000) {
2698#ifdef BETTER_CLOCK_DIAGNOSTIC
2699			printf("forward_hardclock: checkstate %x\n",
2700			       checkstate_probed_cpus);
2701#endif
2702			break;
2703		}
2704	}
2705
2706	/*
2707	 * Step 2: walk through other processors processes, update virtual
2708	 * timer and profiling timer. If stathz == 0, also update ticks and
2709	 * profiling info.
2710	 */
2711
2712	map = 0;
2713	for (id = 0; id < mp_ncpus; id++) {
2714		if (id == PCPU_GET(cpuid))
2715			continue;
2716		if (((1 << id) & checkstate_probed_cpus) == 0)
2717			continue;
2718		p = checkstate_curproc[id];
2719		if (p) {
2720			pstats = p->p_stats;
2721			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2722			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2723			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2724				psignal(p, SIGVTALRM);
2725				map |= (1 << id);
2726			}
2727			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2728			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2729				psignal(p, SIGPROF);
2730				map |= (1 << id);
2731			}
2732		}
2733		if (stathz == 0) {
2734			forwarded_statclock( id, pscnt, &map);
2735		}
2736	}
2737	if (map != 0) {
2738		checkstate_need_ast |= map;
2739		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2740		i = 0;
2741		while ((checkstate_need_ast & map) != 0) {
2742			/* spin */
2743			i++;
2744			if (i > 100000) {
2745#ifdef BETTER_CLOCK_DIAGNOSTIC
2746				printf("forward_hardclock: dropped ast 0x%x\n",
2747				       checkstate_need_ast & map);
2748#endif
2749				break;
2750			}
2751		}
2752	}
2753}
2754
2755#endif /* BETTER_CLOCK */
2756
2757void
2758forward_signal(struct proc *p)
2759{
2760	int map;
2761	int id;
2762	int i;
2763
2764	/* Kludge. We don't yet have separate locks for the interrupts
2765	 * and the kernel. This means that we cannot let the other processors
2766	 * handle complex interrupts while inhibiting them from entering
2767	 * the kernel in a non-interrupt context.
2768	 *
2769	 * What we can do, without changing the locking mechanisms yet,
2770	 * is letting the other processors handle a very simple interrupt
2771	 * (wich determines the processor states), and do the main
2772	 * work ourself.
2773	 */
2774
2775	if (!smp_started || !invltlb_ok || cold || panicstr)
2776		return;
2777	if (!forward_signal_enabled)
2778		return;
2779	mtx_enter(&sched_lock, MTX_SPIN);
2780	while (1) {
2781		if (p->p_stat != SRUN) {
2782			mtx_exit(&sched_lock, MTX_SPIN);
2783			return;
2784		}
2785		id = p->p_oncpu;
2786		mtx_exit(&sched_lock, MTX_SPIN);
2787		if (id == 0xff)
2788			return;
2789		map = (1<<id);
2790		checkstate_need_ast |= map;
2791		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2792		i = 0;
2793		while ((checkstate_need_ast & map) != 0) {
2794			/* spin */
2795			i++;
2796			if (i > 100000) {
2797#if 0
2798				printf("forward_signal: dropped ast 0x%x\n",
2799				       checkstate_need_ast & map);
2800#endif
2801				break;
2802			}
2803		}
2804		mtx_enter(&sched_lock, MTX_SPIN);
2805		if (id == p->p_oncpu) {
2806			mtx_exit(&sched_lock, MTX_SPIN);
2807			return;
2808		}
2809	}
2810}
2811
2812void
2813forward_roundrobin(void)
2814{
2815	u_int map;
2816	int i;
2817
2818	if (!smp_started || !invltlb_ok || cold || panicstr)
2819		return;
2820	if (!forward_roundrobin_enabled)
2821		return;
2822	resched_cpus |= PCPU_GET(other_cpus);
2823	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2824#if 1
2825	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2826#else
2827	(void) all_but_self_ipi(XCPUAST_OFFSET);
2828#endif
2829	i = 0;
2830	while ((checkstate_need_ast & map) != 0) {
2831		/* spin */
2832		i++;
2833		if (i > 100000) {
2834#if 0
2835			printf("forward_roundrobin: dropped ast 0x%x\n",
2836			       checkstate_need_ast & map);
2837#endif
2838			break;
2839		}
2840	}
2841}
2842
2843
2844#ifdef APIC_INTR_REORDER
2845/*
2846 *	Maintain mapping from softintr vector to isr bit in local apic.
2847 */
2848void
2849set_lapic_isrloc(int intr, int vector)
2850{
2851	if (intr < 0 || intr > 32)
2852		panic("set_apic_isrloc: bad intr argument: %d",intr);
2853	if (vector < ICU_OFFSET || vector > 255)
2854		panic("set_apic_isrloc: bad vector argument: %d",vector);
2855	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2856	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2857}
2858#endif
2859
2860/*
2861 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2862 * (if specified), rendezvous, execute the action function (if specified),
2863 * rendezvous again, execute the teardown function (if specified), and then
2864 * resume.
2865 *
2866 * Note that the supplied external functions _must_ be reentrant and aware
2867 * that they are running in parallel and in an unknown lock context.
2868 */
2869static void (*smp_rv_setup_func)(void *arg);
2870static void (*smp_rv_action_func)(void *arg);
2871static void (*smp_rv_teardown_func)(void *arg);
2872static void *smp_rv_func_arg;
2873static volatile int smp_rv_waiters[2];
2874
2875void
2876smp_rendezvous_action(void)
2877{
2878	/* setup function */
2879	if (smp_rv_setup_func != NULL)
2880		smp_rv_setup_func(smp_rv_func_arg);
2881	/* spin on entry rendezvous */
2882	atomic_add_int(&smp_rv_waiters[0], 1);
2883	while (smp_rv_waiters[0] < mp_ncpus)
2884		;
2885	/* action function */
2886	if (smp_rv_action_func != NULL)
2887		smp_rv_action_func(smp_rv_func_arg);
2888	/* spin on exit rendezvous */
2889	atomic_add_int(&smp_rv_waiters[1], 1);
2890	while (smp_rv_waiters[1] < mp_ncpus)
2891		;
2892	/* teardown function */
2893	if (smp_rv_teardown_func != NULL)
2894		smp_rv_teardown_func(smp_rv_func_arg);
2895}
2896
2897void
2898smp_rendezvous(void (* setup_func)(void *),
2899	       void (* action_func)(void *),
2900	       void (* teardown_func)(void *),
2901	       void *arg)
2902{
2903	u_int	efl;
2904
2905	/* obtain rendezvous lock */
2906	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2907
2908	/* set static function pointers */
2909	smp_rv_setup_func = setup_func;
2910	smp_rv_action_func = action_func;
2911	smp_rv_teardown_func = teardown_func;
2912	smp_rv_func_arg = arg;
2913	smp_rv_waiters[0] = 0;
2914	smp_rv_waiters[1] = 0;
2915
2916	/* disable interrupts on this CPU, save interrupt status */
2917	efl = read_eflags();
2918	write_eflags(efl & ~PSL_I);
2919
2920	/* signal other processors, which will enter the IPI with interrupts off */
2921	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2922
2923	/* call executor function */
2924	smp_rendezvous_action();
2925
2926	/* restore interrupt flag */
2927	write_eflags(efl);
2928
2929	/* release lock */
2930	s_unlock(&smp_rv_lock);
2931}
2932
2933void
2934release_aps(void *dummy __unused)
2935{
2936	s_unlock(&ap_boot_lock);
2937}
2938
2939SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2940