mptable.c revision 75392
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/i386/mptable.c 75392 2001-04-10 21:04:32Z jhb $
26 */
27
28#include "opt_cpu.h"
29
30#ifdef SMP
31#include <machine/smptests.h>
32#else
33#error
34#endif
35
36#include <sys/param.h>
37#include <sys/bus.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/proc.h>
41#include <sys/sysctl.h>
42#include <sys/malloc.h>
43#include <sys/memrange.h>
44#include <sys/mutex.h>
45#ifdef BETTER_CLOCK
46#include <sys/dkstat.h>
47#endif
48#include <sys/cons.h>	/* cngetc() */
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/pmap.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_extern.h>
55#ifdef BETTER_CLOCK
56#include <sys/lock.h>
57#include <vm/vm_map.h>
58#include <sys/user.h>
59#ifdef GPROF
60#include <sys/gmon.h>
61#endif
62#endif
63
64#include <machine/smp.h>
65#include <machine/apic.h>
66#include <machine/atomic.h>
67#include <machine/cpufunc.h>
68#include <machine/ipl.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h>		/* setidt() */
79#include <i386/isa/icu.h>		/* IPIs */
80#include <i386/isa/intr_machdep.h>	/* IPIs */
81#endif	/* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1	mpfps->mpfb1
87#endif  /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET		0
90#define WARMBOOT_OFF		(KERNBASE + 0x0467)
91#define WARMBOOT_SEG		(KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE		(0xe8000)
95#define BIOS_SIZE		(0x18000)
96#else
97#define BIOS_BASE		(0xf0000)
98#define BIOS_SIZE		(0x10000)
99#endif
100#define BIOS_COUNT		(BIOS_SIZE/4)
101
102#define CMOS_REG		(0x70)
103#define CMOS_DATA		(0x71)
104#define BIOS_RESET		(0x0f)
105#define BIOS_WARM		(0x0a)
106
107#define PROCENTRY_FLAG_EN	0x01
108#define PROCENTRY_FLAG_BP	0x02
109#define IOAPICENTRY_FLAG_EN	0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114	char    signature[4];
115	void   *pap;
116	u_char  length;
117	u_char  spec_rev;
118	u_char  checksum;
119	u_char  mpfb1;
120	u_char  mpfb2;
121	u_char  mpfb3;
122	u_char  mpfb4;
123	u_char  mpfb5;
124}      *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128	char    signature[4];
129	u_short base_table_length;
130	u_char  spec_rev;
131	u_char  checksum;
132	u_char  oem_id[8];
133	u_char  product_id[12];
134	void   *oem_table_pointer;
135	u_short oem_table_size;
136	u_short entry_count;
137	void   *apic_address;
138	u_short extended_table_length;
139	u_char  extended_table_checksum;
140	u_char  reserved;
141}      *mpcth_t;
142
143
144typedef struct PROCENTRY {
145	u_char  type;
146	u_char  apic_id;
147	u_char  apic_version;
148	u_char  cpu_flags;
149	u_long  cpu_signature;
150	u_long  feature_flags;
151	u_long  reserved1;
152	u_long  reserved2;
153}      *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156	u_char  type;
157	u_char  bus_id;
158	char    bus_type[6];
159}      *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162	u_char  type;
163	u_char  apic_id;
164	u_char  apic_version;
165	u_char  apic_flags;
166	void   *apic_address;
167}      *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170	u_char  type;
171	u_char  int_type;
172	u_short int_flags;
173	u_char  src_bus_id;
174	u_char  src_bus_irq;
175	u_char  dst_apic_id;
176	u_char  dst_apic_int;
177}      *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181	u_char  type;
182	u_char  length;
183	char    name[16];
184}       basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D);				\
199	CHECK_WRITE(0x34, (D));			\
200	CHECK_WRITE(0x35, (D));			\
201	CHECK_WRITE(0x36, (D));			\
202	CHECK_WRITE(0x37, (D));			\
203	CHECK_WRITE(0x38, (D));			\
204	CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S);				\
207	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208	   (S),					\
209	   CHECK_READ(0x34),			\
210	   CHECK_READ(0x35),			\
211	   CHECK_READ(0x36),			\
212	   CHECK_READ(0x37),			\
213	   CHECK_READ(0x38),			\
214	   CHECK_READ(0x39));
215
216#else				/* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif				/* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST	0x10
227#define MP_PROBE_POST		0x11
228#define MPTABLE_PASS1_POST	0x12
229
230#define MP_START_POST		0x13
231#define MP_ENABLE_POST		0x14
232#define MPTABLE_PASS2_POST	0x15
233
234#define START_ALL_APS_POST	0x16
235#define INSTALL_AP_TRAMP_POST	0x17
236#define START_AP_POST		0x18
237
238#define MP_ANNOUNCE_POST	0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct mtx			ap_boot_mtx;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int	current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250int	mp_ncpus;		/* # of CPUs, including BSP */
251int	mp_naps;		/* # of Applications processors */
252int	mp_nbusses;		/* # of busses */
253int	mp_napics;		/* # of IO APICs */
254int	boot_cpu_id;		/* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257extern	int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_REORDER
263struct {
264	volatile int *location;
265	int bit;
266} apic_isrbit_location[32];
267#endif
268
269struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
270
271/*
272 * APIC ID logical/physical mapping structures.
273 * We oversize these to simplify boot-time config.
274 */
275int     cpu_num_to_apic_id[NAPICID];
276int     io_num_to_apic_id[NAPICID];
277int     apic_id_to_logical[NAPICID];
278
279
280/* Bitmap of all available CPUs */
281u_int	all_cpus;
282
283/* AP uses this during bootstrap.  Do not staticize.  */
284char *bootSTK;
285static int bootAP;
286
287/* Hotwire a 0->4MB V==P mapping */
288extern pt_entry_t *KPTphys;
289
290/* SMP page table page */
291extern pt_entry_t *SMPpt;
292
293struct pcb stoppcbs[MAXCPU];
294
295int smp_started;		/* has the system started? */
296int smp_active = 0;		/* are the APs allowed to run? */
297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298
299/* XXX maybe should be hw.ncpu */
300static int smp_cpus = 1;	/* how many cpu's running */
301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302
303int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305
306/* Enable forwarding of a signal to a process running on a different CPU */
307static int forward_signal_enabled = 1;
308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309	   &forward_signal_enabled, 0, "");
310
311/* Enable forwarding of roundrobin to all other cpus */
312static int forward_roundrobin_enabled = 1;
313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314	   &forward_roundrobin_enabled, 0, "");
315
316
317/*
318 * Local data and functions.
319 */
320
321/* Set to 1 once we're ready to let the APs out of the pen. */
322static volatile int aps_ready = 0;
323
324static int	mp_capable;
325static u_int	boot_address;
326static u_int	base_memory;
327
328static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
329static mpfps_t	mpfps;
330static int	search_for_sig(u_int32_t target, int count);
331static void	mp_enable(u_int boot_addr);
332
333static void	mptable_pass1(void);
334static int	mptable_pass2(void);
335static void	default_mp_table(int type);
336static void	fix_mp_table(void);
337static void	setup_apic_irq_mapping(void);
338static void	init_locks(void);
339static int	start_all_aps(u_int boot_addr);
340static void	install_ap_tramp(u_int boot_addr);
341static int	start_ap(int logicalCpu, u_int boot_addr);
342void		ap_init(void);
343static int	apic_int_is_bus_type(int intr, int bus_type);
344static void	release_aps(void *dummy);
345
346/*
347 * initialize all the SMP locks
348 */
349
350/* critical region around IO APIC, apic_imen */
351struct mtx		imen_mtx;
352
353/* lock region used by kernel profiling */
354struct mtx		mcount_mtx;
355
356#ifdef USE_COMLOCK
357/* locks com (tty) data/hardware accesses: a FASTINTR() */
358struct mtx		com_mtx;
359#endif /* USE_COMLOCK */
360
361/* lock around the MP rendezvous */
362static struct mtx	smp_rv_mtx;
363
364/* only 1 CPU can panic at a time :) */
365struct mtx		panic_mtx;
366
367static void
368init_locks(void)
369{
370	/*
371	 * XXX The mcount mutex probably needs to be statically initialized,
372	 * since it will be used even in the function calls that get us to this
373	 * point.
374	 */
375	mtx_init(&mcount_mtx, "mcount", MTX_DEF);
376
377	mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
378	mtx_init(&panic_mtx, "panic", MTX_DEF);
379
380#ifdef USE_COMLOCK
381	mtx_init(&com_mtx, "com", MTX_SPIN);
382#endif /* USE_COMLOCK */
383
384	mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
385}
386
387/*
388 * Calculate usable address in base memory for AP trampoline code.
389 */
390u_int
391mp_bootaddress(u_int basemem)
392{
393	POSTCODE(MP_BOOTADDRESS_POST);
394
395	base_memory = basemem * 1024;	/* convert to bytes */
396
397	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
398	if ((base_memory - boot_address) < bootMP_size)
399		boot_address -= 4096;	/* not enough, lower by 4k */
400
401	return boot_address;
402}
403
404
405/*
406 * Look for an Intel MP spec table (ie, SMP capable hardware).
407 */
408int
409mp_probe(void)
410{
411	int     x;
412	u_long  segment;
413	u_int32_t target;
414
415	POSTCODE(MP_PROBE_POST);
416
417	/* see if EBDA exists */
418	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
419		/* search first 1K of EBDA */
420		target = (u_int32_t) (segment << 4);
421		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
422			goto found;
423	} else {
424		/* last 1K of base memory, effective 'top of base' passed in */
425		target = (u_int32_t) (base_memory - 0x400);
426		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
427			goto found;
428	}
429
430	/* search the BIOS */
431	target = (u_int32_t) BIOS_BASE;
432	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
433		goto found;
434
435	/* nothing found */
436	mpfps = (mpfps_t)0;
437	mp_capable = 0;
438	return 0;
439
440found:
441	/* calculate needed resources */
442	mpfps = (mpfps_t)x;
443	mptable_pass1();
444
445	/* flag fact that we are running multiple processors */
446	mp_capable = 1;
447	return 1;
448}
449
450
451/*
452 * Initialize the SMP hardware and the APIC and start up the AP's.
453 */
454void
455mp_start(void)
456{
457	POSTCODE(MP_START_POST);
458
459	/* look for MP capable motherboard */
460	if (mp_capable)
461		mp_enable(boot_address);
462	else
463		panic("MP hardware not found!");
464}
465
466
467/*
468 * Print various information about the SMP system hardware and setup.
469 */
470void
471mp_announce(void)
472{
473	int     x;
474
475	POSTCODE(MP_ANNOUNCE_POST);
476
477	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
478	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
479	printf(", version: 0x%08x", cpu_apic_versions[0]);
480	printf(", at 0x%08x\n", cpu_apic_address);
481	for (x = 1; x <= mp_naps; ++x) {
482		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
483		printf(", version: 0x%08x", cpu_apic_versions[x]);
484		printf(", at 0x%08x\n", cpu_apic_address);
485	}
486
487#if defined(APIC_IO)
488	for (x = 0; x < mp_napics; ++x) {
489		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
490		printf(", version: 0x%08x", io_apic_versions[x]);
491		printf(", at 0x%08x\n", io_apic_address[x]);
492	}
493#else
494	printf(" Warning: APIC I/O disabled\n");
495#endif	/* APIC_IO */
496}
497
498/*
499 * AP cpu's call this to sync up protected mode.
500 */
501void
502init_secondary(void)
503{
504	int	gsel_tss;
505	int	x, myid = bootAP;
506
507	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
508	gdt_segs[GPROC0_SEL].ssd_base =
509		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
510	SMP_prvspace[myid].globaldata.gd_prvspace =
511		&SMP_prvspace[myid].globaldata;
512
513	for (x = 0; x < NGDT; x++) {
514		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
515	}
516
517	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
518	r_gdt.rd_base = (int) &gdt[myid * NGDT];
519	lgdt(&r_gdt);			/* does magic intra-segment return */
520
521	lidt(&r_idt);
522
523	lldt(_default_ldt);
524	PCPU_SET(currentldt, _default_ldt);
525
526	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
527	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
528	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
529	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
530	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
531	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
532	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
533	ltr(gsel_tss);
534
535	pmap_set_opt();
536}
537
538
539#if defined(APIC_IO)
540/*
541 * Final configuration of the BSP's local APIC:
542 *  - disable 'pic mode'.
543 *  - disable 'virtual wire mode'.
544 *  - enable NMI.
545 */
546void
547bsp_apic_configure(void)
548{
549	u_char		byte;
550	u_int32_t	temp;
551
552	/* leave 'pic mode' if necessary */
553	if (picmode) {
554		outb(0x22, 0x70);	/* select IMCR */
555		byte = inb(0x23);	/* current contents */
556		byte |= 0x01;		/* mask external INTR */
557		outb(0x23, byte);	/* disconnect 8259s/NMI */
558	}
559
560	/* mask lint0 (the 8259 'virtual wire' connection) */
561	temp = lapic.lvt_lint0;
562	temp |= APIC_LVT_M;		/* set the mask */
563	lapic.lvt_lint0 = temp;
564
565        /* setup lint1 to handle NMI */
566        temp = lapic.lvt_lint1;
567        temp &= ~APIC_LVT_M;		/* clear the mask */
568        lapic.lvt_lint1 = temp;
569
570	if (bootverbose)
571		apic_dump("bsp_apic_configure()");
572}
573#endif  /* APIC_IO */
574
575
576/*******************************************************************
577 * local functions and data
578 */
579
580/*
581 * start the SMP system
582 */
583static void
584mp_enable(u_int boot_addr)
585{
586	int     x;
587#if defined(APIC_IO)
588	int     apic;
589	u_int   ux;
590#endif	/* APIC_IO */
591
592	POSTCODE(MP_ENABLE_POST);
593
594	/* turn on 4MB of V == P addressing so we can get to MP table */
595	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
596	invltlb();
597
598	/* examine the MP table for needed info, uses physical addresses */
599	x = mptable_pass2();
600
601	*(int *)PTD = 0;
602	invltlb();
603
604	/* can't process default configs till the CPU APIC is pmapped */
605	if (x)
606		default_mp_table(x);
607
608	/* post scan cleanup */
609	fix_mp_table();
610	setup_apic_irq_mapping();
611
612#if defined(APIC_IO)
613
614	/* fill the LOGICAL io_apic_versions table */
615	for (apic = 0; apic < mp_napics; ++apic) {
616		ux = io_apic_read(apic, IOAPIC_VER);
617		io_apic_versions[apic] = ux;
618		io_apic_set_id(apic, IO_TO_ID(apic));
619	}
620
621	/* program each IO APIC in the system */
622	for (apic = 0; apic < mp_napics; ++apic)
623		if (io_apic_setup(apic) < 0)
624			panic("IO APIC setup failure");
625
626	/* install a 'Spurious INTerrupt' vector */
627	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
628	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
629
630	/* install an inter-CPU IPI for TLB invalidation */
631	setidt(XINVLTLB_OFFSET, Xinvltlb,
632	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
633
634#ifdef BETTER_CLOCK
635	/* install an inter-CPU IPI for reading processor state */
636	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
637	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
638#endif
639
640	/* install an inter-CPU IPI for all-CPU rendezvous */
641	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
642	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
643
644	/* install an inter-CPU IPI for forcing an additional software trap */
645	setidt(XCPUAST_OFFSET, Xcpuast,
646	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
647
648	/* install an inter-CPU IPI for CPU stop/restart */
649	setidt(XCPUSTOP_OFFSET, Xcpustop,
650	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
651
652#if defined(TEST_TEST1)
653	/* install a "fake hardware INTerrupt" vector */
654	setidt(XTEST1_OFFSET, Xtest1,
655	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
656#endif  /** TEST_TEST1 */
657
658#endif	/* APIC_IO */
659
660	/* initialize all SMP locks */
661	init_locks();
662
663	/* start each Application Processor */
664	start_all_aps(boot_addr);
665}
666
667
668/*
669 * look for the MP spec signature
670 */
671
672/* string defined by the Intel MP Spec as identifying the MP table */
673#define MP_SIG		0x5f504d5f	/* _MP_ */
674#define NEXT(X)		((X) += 4)
675static int
676search_for_sig(u_int32_t target, int count)
677{
678	int     x;
679	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
680
681	for (x = 0; x < count; NEXT(x))
682		if (addr[x] == MP_SIG)
683			/* make array index a byte index */
684			return (target + (x * sizeof(u_int32_t)));
685
686	return -1;
687}
688
689
690static basetable_entry basetable_entry_types[] =
691{
692	{0, 20, "Processor"},
693	{1, 8, "Bus"},
694	{2, 8, "I/O APIC"},
695	{3, 8, "I/O INT"},
696	{4, 8, "Local INT"}
697};
698
699typedef struct BUSDATA {
700	u_char  bus_id;
701	enum busTypes bus_type;
702}       bus_datum;
703
704typedef struct INTDATA {
705	u_char  int_type;
706	u_short int_flags;
707	u_char  src_bus_id;
708	u_char  src_bus_irq;
709	u_char  dst_apic_id;
710	u_char  dst_apic_int;
711	u_char	int_vector;
712}       io_int, local_int;
713
714typedef struct BUSTYPENAME {
715	u_char  type;
716	char    name[7];
717}       bus_type_name;
718
719static bus_type_name bus_type_table[] =
720{
721	{CBUS, "CBUS"},
722	{CBUSII, "CBUSII"},
723	{EISA, "EISA"},
724	{MCA, "MCA"},
725	{UNKNOWN_BUSTYPE, "---"},
726	{ISA, "ISA"},
727	{MCA, "MCA"},
728	{UNKNOWN_BUSTYPE, "---"},
729	{UNKNOWN_BUSTYPE, "---"},
730	{UNKNOWN_BUSTYPE, "---"},
731	{UNKNOWN_BUSTYPE, "---"},
732	{UNKNOWN_BUSTYPE, "---"},
733	{PCI, "PCI"},
734	{UNKNOWN_BUSTYPE, "---"},
735	{UNKNOWN_BUSTYPE, "---"},
736	{UNKNOWN_BUSTYPE, "---"},
737	{UNKNOWN_BUSTYPE, "---"},
738	{XPRESS, "XPRESS"},
739	{UNKNOWN_BUSTYPE, "---"}
740};
741/* from MP spec v1.4, table 5-1 */
742static int default_data[7][5] =
743{
744/*   nbus, id0, type0, id1, type1 */
745	{1, 0, ISA, 255, 255},
746	{1, 0, EISA, 255, 255},
747	{1, 0, EISA, 255, 255},
748	{1, 0, MCA, 255, 255},
749	{2, 0, ISA, 1, PCI},
750	{2, 0, EISA, 1, PCI},
751	{2, 0, MCA, 1, PCI}
752};
753
754
755/* the bus data */
756static bus_datum *bus_data;
757
758/* the IO INT data, one entry per possible APIC INTerrupt */
759static io_int  *io_apic_ints;
760
761static int nintrs;
762
763static int processor_entry	__P((proc_entry_ptr entry, int cpu));
764static int bus_entry		__P((bus_entry_ptr entry, int bus));
765static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
766static int int_entry		__P((int_entry_ptr entry, int intr));
767static int lookup_bus_type	__P((char *name));
768
769
770/*
771 * 1st pass on motherboard's Intel MP specification table.
772 *
773 * initializes:
774 *	mp_ncpus = 1
775 *
776 * determines:
777 *	cpu_apic_address (common to all CPUs)
778 *	io_apic_address[N]
779 *	mp_naps
780 *	mp_nbusses
781 *	mp_napics
782 *	nintrs
783 */
784static void
785mptable_pass1(void)
786{
787	int	x;
788	mpcth_t	cth;
789	int	totalSize;
790	void*	position;
791	int	count;
792	int	type;
793
794	POSTCODE(MPTABLE_PASS1_POST);
795
796	/* clear various tables */
797	for (x = 0; x < NAPICID; ++x) {
798		io_apic_address[x] = ~0;	/* IO APIC address table */
799	}
800
801	/* init everything to empty */
802	mp_naps = 0;
803	mp_nbusses = 0;
804	mp_napics = 0;
805	nintrs = 0;
806
807	/* check for use of 'default' configuration */
808	if (MPFPS_MPFB1 != 0) {
809		/* use default addresses */
810		cpu_apic_address = DEFAULT_APIC_BASE;
811		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
812
813		/* fill in with defaults */
814		mp_naps = 2;		/* includes BSP */
815		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
816#if defined(APIC_IO)
817		mp_napics = 1;
818		nintrs = 16;
819#endif	/* APIC_IO */
820	}
821	else {
822		if ((cth = mpfps->pap) == 0)
823			panic("MP Configuration Table Header MISSING!");
824
825		cpu_apic_address = (vm_offset_t) cth->apic_address;
826
827		/* walk the table, recording info of interest */
828		totalSize = cth->base_table_length - sizeof(struct MPCTH);
829		position = (u_char *) cth + sizeof(struct MPCTH);
830		count = cth->entry_count;
831
832		while (count--) {
833			switch (type = *(u_char *) position) {
834			case 0: /* processor_entry */
835				if (((proc_entry_ptr)position)->cpu_flags
836					& PROCENTRY_FLAG_EN)
837					++mp_naps;
838				break;
839			case 1: /* bus_entry */
840				++mp_nbusses;
841				break;
842			case 2: /* io_apic_entry */
843				if (((io_apic_entry_ptr)position)->apic_flags
844					& IOAPICENTRY_FLAG_EN)
845					io_apic_address[mp_napics++] =
846					    (vm_offset_t)((io_apic_entry_ptr)
847						position)->apic_address;
848				break;
849			case 3: /* int_entry */
850				++nintrs;
851				break;
852			case 4:	/* int_entry */
853				break;
854			default:
855				panic("mpfps Base Table HOSED!");
856				/* NOTREACHED */
857			}
858
859			totalSize -= basetable_entry_types[type].length;
860			(u_char*)position += basetable_entry_types[type].length;
861		}
862	}
863
864	/* qualify the numbers */
865	if (mp_naps > MAXCPU) {
866		printf("Warning: only using %d of %d available CPUs!\n",
867			MAXCPU, mp_naps);
868		mp_naps = MAXCPU;
869	}
870
871	/*
872	 * Count the BSP.
873	 * This is also used as a counter while starting the APs.
874	 */
875	mp_ncpus = 1;
876
877	--mp_naps;	/* subtract the BSP */
878}
879
880
881/*
882 * 2nd pass on motherboard's Intel MP specification table.
883 *
884 * sets:
885 *	boot_cpu_id
886 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
887 *	CPU_TO_ID(N), logical CPU to APIC ID table
888 *	IO_TO_ID(N), logical IO to APIC ID table
889 *	bus_data[N]
890 *	io_apic_ints[N]
891 */
892static int
893mptable_pass2(void)
894{
895	int     x;
896	mpcth_t cth;
897	int     totalSize;
898	void*   position;
899	int     count;
900	int     type;
901	int     apic, bus, cpu, intr;
902	int	i, j;
903	int	pgeflag;
904
905	POSTCODE(MPTABLE_PASS2_POST);
906
907	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
908
909	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
910	    M_DEVBUF, M_WAITOK);
911	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
912	    M_DEVBUF, M_WAITOK);
913	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
914	    M_DEVBUF, M_WAITOK);
915	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
916	    M_DEVBUF, M_WAITOK);
917
918	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
919
920	for (i = 0; i < mp_napics; i++) {
921		for (j = 0; j < mp_napics; j++) {
922			/* same page frame as a previous IO apic? */
923			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
924			    (io_apic_address[i] & PG_FRAME)) {
925				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
926					+ (NPTEPG-2-j) * PAGE_SIZE
927					+ (io_apic_address[i] & PAGE_MASK));
928				break;
929			}
930			/* use this slot if available */
931			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
932				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
933				    pgeflag | (io_apic_address[i] & PG_FRAME));
934				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
935					+ (NPTEPG-2-j) * PAGE_SIZE
936					+ (io_apic_address[i] & PAGE_MASK));
937				break;
938			}
939		}
940	}
941
942	/* clear various tables */
943	for (x = 0; x < NAPICID; ++x) {
944		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
945		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
946		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
947	}
948
949	/* clear bus data table */
950	for (x = 0; x < mp_nbusses; ++x)
951		bus_data[x].bus_id = 0xff;
952
953	/* clear IO APIC INT table */
954	for (x = 0; x < (nintrs + 1); ++x) {
955		io_apic_ints[x].int_type = 0xff;
956		io_apic_ints[x].int_vector = 0xff;
957	}
958
959	/* setup the cpu/apic mapping arrays */
960	boot_cpu_id = -1;
961
962	/* record whether PIC or virtual-wire mode */
963	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
964
965	/* check for use of 'default' configuration */
966	if (MPFPS_MPFB1 != 0)
967		return MPFPS_MPFB1;	/* return default configuration type */
968
969	if ((cth = mpfps->pap) == 0)
970		panic("MP Configuration Table Header MISSING!");
971
972	/* walk the table, recording info of interest */
973	totalSize = cth->base_table_length - sizeof(struct MPCTH);
974	position = (u_char *) cth + sizeof(struct MPCTH);
975	count = cth->entry_count;
976	apic = bus = intr = 0;
977	cpu = 1;				/* pre-count the BSP */
978
979	while (count--) {
980		switch (type = *(u_char *) position) {
981		case 0:
982			if (processor_entry(position, cpu))
983				++cpu;
984			break;
985		case 1:
986			if (bus_entry(position, bus))
987				++bus;
988			break;
989		case 2:
990			if (io_apic_entry(position, apic))
991				++apic;
992			break;
993		case 3:
994			if (int_entry(position, intr))
995				++intr;
996			break;
997		case 4:
998			/* int_entry(position); */
999			break;
1000		default:
1001			panic("mpfps Base Table HOSED!");
1002			/* NOTREACHED */
1003		}
1004
1005		totalSize -= basetable_entry_types[type].length;
1006		(u_char *) position += basetable_entry_types[type].length;
1007	}
1008
1009	if (boot_cpu_id == -1)
1010		panic("NO BSP found!");
1011
1012	/* report fact that its NOT a default configuration */
1013	return 0;
1014}
1015
1016
1017void
1018assign_apic_irq(int apic, int intpin, int irq)
1019{
1020	int x;
1021
1022	if (int_to_apicintpin[irq].ioapic != -1)
1023		panic("assign_apic_irq: inconsistent table");
1024
1025	int_to_apicintpin[irq].ioapic = apic;
1026	int_to_apicintpin[irq].int_pin = intpin;
1027	int_to_apicintpin[irq].apic_address = ioapic[apic];
1028	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1029
1030	for (x = 0; x < nintrs; x++) {
1031		if ((io_apic_ints[x].int_type == 0 ||
1032		     io_apic_ints[x].int_type == 3) &&
1033		    io_apic_ints[x].int_vector == 0xff &&
1034		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1035		    io_apic_ints[x].dst_apic_int == intpin)
1036			io_apic_ints[x].int_vector = irq;
1037	}
1038}
1039
1040void
1041revoke_apic_irq(int irq)
1042{
1043	int x;
1044	int oldapic;
1045	int oldintpin;
1046
1047	if (int_to_apicintpin[irq].ioapic == -1)
1048		panic("assign_apic_irq: inconsistent table");
1049
1050	oldapic = int_to_apicintpin[irq].ioapic;
1051	oldintpin = int_to_apicintpin[irq].int_pin;
1052
1053	int_to_apicintpin[irq].ioapic = -1;
1054	int_to_apicintpin[irq].int_pin = 0;
1055	int_to_apicintpin[irq].apic_address = NULL;
1056	int_to_apicintpin[irq].redirindex = 0;
1057
1058	for (x = 0; x < nintrs; x++) {
1059		if ((io_apic_ints[x].int_type == 0 ||
1060		     io_apic_ints[x].int_type == 3) &&
1061		    io_apic_ints[x].int_vector == 0xff &&
1062		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1063		    io_apic_ints[x].dst_apic_int == oldintpin)
1064			io_apic_ints[x].int_vector = 0xff;
1065	}
1066}
1067
1068
1069static void
1070allocate_apic_irq(int intr)
1071{
1072	int apic;
1073	int intpin;
1074	int irq;
1075
1076	if (io_apic_ints[intr].int_vector != 0xff)
1077		return;		/* Interrupt handler already assigned */
1078
1079	if (io_apic_ints[intr].int_type != 0 &&
1080	    (io_apic_ints[intr].int_type != 3 ||
1081	     (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1082	      io_apic_ints[intr].dst_apic_int == 0)))
1083		return;		/* Not INT or ExtInt on != (0, 0) */
1084
1085	irq = 0;
1086	while (irq < APIC_INTMAPSIZE &&
1087	       int_to_apicintpin[irq].ioapic != -1)
1088		irq++;
1089
1090	if (irq >= APIC_INTMAPSIZE)
1091		return;		/* No free interrupt handlers */
1092
1093	apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1094	intpin = io_apic_ints[intr].dst_apic_int;
1095
1096	assign_apic_irq(apic, intpin, irq);
1097	io_apic_setup_intpin(apic, intpin);
1098}
1099
1100
1101static void
1102swap_apic_id(int apic, int oldid, int newid)
1103{
1104	int x;
1105	int oapic;
1106
1107
1108	if (oldid == newid)
1109		return;			/* Nothing to do */
1110
1111	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1112	       apic, oldid, newid);
1113
1114	/* Swap physical APIC IDs in interrupt entries */
1115	for (x = 0; x < nintrs; x++) {
1116		if (io_apic_ints[x].dst_apic_id == oldid)
1117			io_apic_ints[x].dst_apic_id = newid;
1118		else if (io_apic_ints[x].dst_apic_id == newid)
1119			io_apic_ints[x].dst_apic_id = oldid;
1120	}
1121
1122	/* Swap physical APIC IDs in IO_TO_ID mappings */
1123	for (oapic = 0; oapic < mp_napics; oapic++)
1124		if (IO_TO_ID(oapic) == newid)
1125			break;
1126
1127	if (oapic < mp_napics) {
1128		printf("Changing APIC ID for IO APIC #%d from "
1129		       "%d to %d in MP table\n",
1130		       oapic, newid, oldid);
1131		IO_TO_ID(oapic) = oldid;
1132	}
1133	IO_TO_ID(apic) = newid;
1134}
1135
1136
1137static void
1138fix_id_to_io_mapping(void)
1139{
1140	int x;
1141
1142	for (x = 0; x < NAPICID; x++)
1143		ID_TO_IO(x) = -1;
1144
1145	for (x = 0; x <= mp_naps; x++)
1146		if (CPU_TO_ID(x) < NAPICID)
1147			ID_TO_IO(CPU_TO_ID(x)) = x;
1148
1149	for (x = 0; x < mp_napics; x++)
1150		if (IO_TO_ID(x) < NAPICID)
1151			ID_TO_IO(IO_TO_ID(x)) = x;
1152}
1153
1154
1155static int
1156first_free_apic_id(void)
1157{
1158	int freeid, x;
1159
1160	for (freeid = 0; freeid < NAPICID; freeid++) {
1161		for (x = 0; x <= mp_naps; x++)
1162			if (CPU_TO_ID(x) == freeid)
1163				break;
1164		if (x <= mp_naps)
1165			continue;
1166		for (x = 0; x < mp_napics; x++)
1167			if (IO_TO_ID(x) == freeid)
1168				break;
1169		if (x < mp_napics)
1170			continue;
1171		return freeid;
1172	}
1173	return freeid;
1174}
1175
1176
1177static int
1178io_apic_id_acceptable(int apic, int id)
1179{
1180	int cpu;		/* Logical CPU number */
1181	int oapic;		/* Logical IO APIC number for other IO APIC */
1182
1183	if (id >= NAPICID)
1184		return 0;	/* Out of range */
1185
1186	for (cpu = 0; cpu <= mp_naps; cpu++)
1187		if (CPU_TO_ID(cpu) == id)
1188			return 0;	/* Conflict with CPU */
1189
1190	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1191		if (IO_TO_ID(oapic) == id)
1192			return 0;	/* Conflict with other APIC */
1193
1194	return 1;		/* ID is acceptable for IO APIC */
1195}
1196
1197
1198/*
1199 * parse an Intel MP specification table
1200 */
1201static void
1202fix_mp_table(void)
1203{
1204	int	x;
1205	int	id;
1206	int	bus_0 = 0;	/* Stop GCC warning */
1207	int	bus_pci = 0;	/* Stop GCC warning */
1208	int	num_pci_bus;
1209	int	apic;		/* IO APIC unit number */
1210	int     freeid;		/* Free physical APIC ID */
1211	int	physid;		/* Current physical IO APIC ID */
1212
1213	/*
1214	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1215	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1216	 * exists the BIOS must begin with bus entries for the PCI bus and use
1217	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1218	 * exists the BIOS can choose to ignore this ordering, and indeed many
1219	 * MP motherboards do ignore it.  This causes a problem when the PCI
1220	 * sub-system makes requests of the MP sub-system based on PCI bus
1221	 * numbers.	So here we look for the situation and renumber the
1222	 * busses and associated INTs in an effort to "make it right".
1223	 */
1224
1225	/* find bus 0, PCI bus, count the number of PCI busses */
1226	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1227		if (bus_data[x].bus_id == 0) {
1228			bus_0 = x;
1229		}
1230		if (bus_data[x].bus_type == PCI) {
1231			++num_pci_bus;
1232			bus_pci = x;
1233		}
1234	}
1235	/*
1236	 * bus_0 == slot of bus with ID of 0
1237	 * bus_pci == slot of last PCI bus encountered
1238	 */
1239
1240	/* check the 1 PCI bus case for sanity */
1241	/* if it is number 0 all is well */
1242	if (num_pci_bus == 1 &&
1243	    bus_data[bus_pci].bus_id != 0) {
1244
1245		/* mis-numbered, swap with whichever bus uses slot 0 */
1246
1247		/* swap the bus entry types */
1248		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1249		bus_data[bus_0].bus_type = PCI;
1250
1251		/* swap each relavant INTerrupt entry */
1252		id = bus_data[bus_pci].bus_id;
1253		for (x = 0; x < nintrs; ++x) {
1254			if (io_apic_ints[x].src_bus_id == id) {
1255				io_apic_ints[x].src_bus_id = 0;
1256			}
1257			else if (io_apic_ints[x].src_bus_id == 0) {
1258				io_apic_ints[x].src_bus_id = id;
1259			}
1260		}
1261	}
1262
1263	/* Assign IO APIC IDs.
1264	 *
1265	 * First try the existing ID. If a conflict is detected, try
1266	 * the ID in the MP table.  If a conflict is still detected, find
1267	 * a free id.
1268	 *
1269	 * We cannot use the ID_TO_IO table before all conflicts has been
1270	 * resolved and the table has been corrected.
1271	 */
1272	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1273
1274		/* First try to use the value set by the BIOS */
1275		physid = io_apic_get_id(apic);
1276		if (io_apic_id_acceptable(apic, physid)) {
1277			if (IO_TO_ID(apic) != physid)
1278				swap_apic_id(apic, IO_TO_ID(apic), physid);
1279			continue;
1280		}
1281
1282		/* Then check if the value in the MP table is acceptable */
1283		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1284			continue;
1285
1286		/* Last resort, find a free APIC ID and use it */
1287		freeid = first_free_apic_id();
1288		if (freeid >= NAPICID)
1289			panic("No free physical APIC IDs found");
1290
1291		if (io_apic_id_acceptable(apic, freeid)) {
1292			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1293			continue;
1294		}
1295		panic("Free physical APIC ID not usable");
1296	}
1297	fix_id_to_io_mapping();
1298
1299	/* detect and fix broken Compaq MP table */
1300	if (apic_int_type(0, 0) == -1) {
1301		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1302		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1303		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1304		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1305		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1306		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1307		nintrs++;
1308	}
1309}
1310
1311
1312/* Assign low level interrupt handlers */
1313static void
1314setup_apic_irq_mapping(void)
1315{
1316	int	x;
1317	int	int_vector;
1318
1319	/* Clear array */
1320	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1321		int_to_apicintpin[x].ioapic = -1;
1322		int_to_apicintpin[x].int_pin = 0;
1323		int_to_apicintpin[x].apic_address = NULL;
1324		int_to_apicintpin[x].redirindex = 0;
1325	}
1326
1327	/* First assign ISA/EISA interrupts */
1328	for (x = 0; x < nintrs; x++) {
1329		int_vector = io_apic_ints[x].src_bus_irq;
1330		if (int_vector < APIC_INTMAPSIZE &&
1331		    io_apic_ints[x].int_vector == 0xff &&
1332		    int_to_apicintpin[int_vector].ioapic == -1 &&
1333		    (apic_int_is_bus_type(x, ISA) ||
1334		     apic_int_is_bus_type(x, EISA)) &&
1335		    io_apic_ints[x].int_type == 0) {
1336			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1337					io_apic_ints[x].dst_apic_int,
1338					int_vector);
1339		}
1340	}
1341
1342	/* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1343	for (x = 0; x < nintrs; x++) {
1344		if (io_apic_ints[x].dst_apic_int == 0 &&
1345		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1346		    io_apic_ints[x].int_vector == 0xff &&
1347		    int_to_apicintpin[0].ioapic == -1 &&
1348		    io_apic_ints[x].int_type == 3) {
1349			assign_apic_irq(0, 0, 0);
1350			break;
1351		}
1352	}
1353	/* PCI interrupt assignment is deferred */
1354}
1355
1356
1357static int
1358processor_entry(proc_entry_ptr entry, int cpu)
1359{
1360	/* check for usability */
1361	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1362		return 0;
1363
1364	if(entry->apic_id >= NAPICID)
1365		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1366	/* check for BSP flag */
1367	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1368		boot_cpu_id = entry->apic_id;
1369		CPU_TO_ID(0) = entry->apic_id;
1370		ID_TO_CPU(entry->apic_id) = 0;
1371		return 0;	/* its already been counted */
1372	}
1373
1374	/* add another AP to list, if less than max number of CPUs */
1375	else if (cpu < MAXCPU) {
1376		CPU_TO_ID(cpu) = entry->apic_id;
1377		ID_TO_CPU(entry->apic_id) = cpu;
1378		return 1;
1379	}
1380
1381	return 0;
1382}
1383
1384
1385static int
1386bus_entry(bus_entry_ptr entry, int bus)
1387{
1388	int     x;
1389	char    c, name[8];
1390
1391	/* encode the name into an index */
1392	for (x = 0; x < 6; ++x) {
1393		if ((c = entry->bus_type[x]) == ' ')
1394			break;
1395		name[x] = c;
1396	}
1397	name[x] = '\0';
1398
1399	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1400		panic("unknown bus type: '%s'", name);
1401
1402	bus_data[bus].bus_id = entry->bus_id;
1403	bus_data[bus].bus_type = x;
1404
1405	return 1;
1406}
1407
1408
1409static int
1410io_apic_entry(io_apic_entry_ptr entry, int apic)
1411{
1412	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1413		return 0;
1414
1415	IO_TO_ID(apic) = entry->apic_id;
1416	if (entry->apic_id < NAPICID)
1417		ID_TO_IO(entry->apic_id) = apic;
1418
1419	return 1;
1420}
1421
1422
1423static int
1424lookup_bus_type(char *name)
1425{
1426	int     x;
1427
1428	for (x = 0; x < MAX_BUSTYPE; ++x)
1429		if (strcmp(bus_type_table[x].name, name) == 0)
1430			return bus_type_table[x].type;
1431
1432	return UNKNOWN_BUSTYPE;
1433}
1434
1435
1436static int
1437int_entry(int_entry_ptr entry, int intr)
1438{
1439	int apic;
1440
1441	io_apic_ints[intr].int_type = entry->int_type;
1442	io_apic_ints[intr].int_flags = entry->int_flags;
1443	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1444	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1445	if (entry->dst_apic_id == 255) {
1446		/* This signal goes to all IO APICS.  Select an IO APIC
1447		   with sufficient number of interrupt pins */
1448		for (apic = 0; apic < mp_napics; apic++)
1449			if (((io_apic_read(apic, IOAPIC_VER) &
1450			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1451			    entry->dst_apic_int)
1452				break;
1453		if (apic < mp_napics)
1454			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1455		else
1456			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1457	} else
1458		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1459	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1460
1461	return 1;
1462}
1463
1464
1465static int
1466apic_int_is_bus_type(int intr, int bus_type)
1467{
1468	int     bus;
1469
1470	for (bus = 0; bus < mp_nbusses; ++bus)
1471		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1472		    && ((int) bus_data[bus].bus_type == bus_type))
1473			return 1;
1474
1475	return 0;
1476}
1477
1478
1479/*
1480 * Given a traditional ISA INT mask, return an APIC mask.
1481 */
1482u_int
1483isa_apic_mask(u_int isa_mask)
1484{
1485	int isa_irq;
1486	int apic_pin;
1487
1488#if defined(SKIP_IRQ15_REDIRECT)
1489	if (isa_mask == (1 << 15)) {
1490		printf("skipping ISA IRQ15 redirect\n");
1491		return isa_mask;
1492	}
1493#endif  /* SKIP_IRQ15_REDIRECT */
1494
1495	isa_irq = ffs(isa_mask);		/* find its bit position */
1496	if (isa_irq == 0)			/* doesn't exist */
1497		return 0;
1498	--isa_irq;				/* make it zero based */
1499
1500	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1501	if (apic_pin == -1)
1502		return 0;
1503
1504	return (1 << apic_pin);			/* convert pin# to a mask */
1505}
1506
1507
1508/*
1509 * Determine which APIC pin an ISA/EISA INT is attached to.
1510 */
1511#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1512#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1513#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1514#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1515
1516#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1517int
1518isa_apic_irq(int isa_irq)
1519{
1520	int     intr;
1521
1522	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1523		if (INTTYPE(intr) == 0) {		/* standard INT */
1524			if (SRCBUSIRQ(intr) == isa_irq) {
1525				if (apic_int_is_bus_type(intr, ISA) ||
1526			            apic_int_is_bus_type(intr, EISA)) {
1527					if (INTIRQ(intr) == 0xff)
1528						return -1; /* unassigned */
1529					return INTIRQ(intr);	/* found */
1530				}
1531			}
1532		}
1533	}
1534	return -1;					/* NOT found */
1535}
1536
1537
1538/*
1539 * Determine which APIC pin a PCI INT is attached to.
1540 */
1541#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1542#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1543#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1544int
1545pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1546{
1547	int     intr;
1548
1549	--pciInt;					/* zero based */
1550
1551	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1552		if ((INTTYPE(intr) == 0)		/* standard INT */
1553		    && (SRCBUSID(intr) == pciBus)
1554		    && (SRCBUSDEVICE(intr) == pciDevice)
1555		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1556			if (apic_int_is_bus_type(intr, PCI)) {
1557				if (INTIRQ(intr) == 0xff)
1558					allocate_apic_irq(intr);
1559				if (INTIRQ(intr) == 0xff)
1560					return -1;	/* unassigned */
1561				return INTIRQ(intr);	/* exact match */
1562			}
1563
1564	return -1;					/* NOT found */
1565}
1566
1567int
1568next_apic_irq(int irq)
1569{
1570	int intr, ointr;
1571	int bus, bustype;
1572
1573	bus = 0;
1574	bustype = 0;
1575	for (intr = 0; intr < nintrs; intr++) {
1576		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1577			continue;
1578		bus = SRCBUSID(intr);
1579		bustype = apic_bus_type(bus);
1580		if (bustype != ISA &&
1581		    bustype != EISA &&
1582		    bustype != PCI)
1583			continue;
1584		break;
1585	}
1586	if (intr >= nintrs) {
1587		return -1;
1588	}
1589	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1590		if (INTTYPE(ointr) != 0)
1591			continue;
1592		if (bus != SRCBUSID(ointr))
1593			continue;
1594		if (bustype == PCI) {
1595			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1596				continue;
1597			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1598				continue;
1599		}
1600		if (bustype == ISA || bustype == EISA) {
1601			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1602				continue;
1603		}
1604		if (INTPIN(intr) == INTPIN(ointr))
1605			continue;
1606		break;
1607	}
1608	if (ointr >= nintrs) {
1609		return -1;
1610	}
1611	return INTIRQ(ointr);
1612}
1613#undef SRCBUSLINE
1614#undef SRCBUSDEVICE
1615#undef SRCBUSID
1616#undef SRCBUSIRQ
1617
1618#undef INTPIN
1619#undef INTIRQ
1620#undef INTAPIC
1621#undef INTTYPE
1622
1623
1624/*
1625 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1626 *
1627 * XXX FIXME:
1628 *  Exactly what this means is unclear at this point.  It is a solution
1629 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1630 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1631 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1632 *  option.
1633 */
1634int
1635undirect_isa_irq(int rirq)
1636{
1637#if defined(READY)
1638	if (bootverbose)
1639	    printf("Freeing redirected ISA irq %d.\n", rirq);
1640	/** FIXME: tickle the MB redirector chip */
1641	return -1;
1642#else
1643	if (bootverbose)
1644	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1645	return 0;
1646#endif  /* READY */
1647}
1648
1649
1650/*
1651 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1652 */
1653int
1654undirect_pci_irq(int rirq)
1655{
1656#if defined(READY)
1657	if (bootverbose)
1658		printf("Freeing redirected PCI irq %d.\n", rirq);
1659
1660	/** FIXME: tickle the MB redirector chip */
1661	return -1;
1662#else
1663	if (bootverbose)
1664		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1665		       rirq);
1666	return 0;
1667#endif  /* READY */
1668}
1669
1670
1671/*
1672 * given a bus ID, return:
1673 *  the bus type if found
1674 *  -1 if NOT found
1675 */
1676int
1677apic_bus_type(int id)
1678{
1679	int     x;
1680
1681	for (x = 0; x < mp_nbusses; ++x)
1682		if (bus_data[x].bus_id == id)
1683			return bus_data[x].bus_type;
1684
1685	return -1;
1686}
1687
1688
1689/*
1690 * given a LOGICAL APIC# and pin#, return:
1691 *  the associated src bus ID if found
1692 *  -1 if NOT found
1693 */
1694int
1695apic_src_bus_id(int apic, int pin)
1696{
1697	int     x;
1698
1699	/* search each of the possible INTerrupt sources */
1700	for (x = 0; x < nintrs; ++x)
1701		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1702		    (pin == io_apic_ints[x].dst_apic_int))
1703			return (io_apic_ints[x].src_bus_id);
1704
1705	return -1;		/* NOT found */
1706}
1707
1708
1709/*
1710 * given a LOGICAL APIC# and pin#, return:
1711 *  the associated src bus IRQ if found
1712 *  -1 if NOT found
1713 */
1714int
1715apic_src_bus_irq(int apic, int pin)
1716{
1717	int     x;
1718
1719	for (x = 0; x < nintrs; x++)
1720		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1721		    (pin == io_apic_ints[x].dst_apic_int))
1722			return (io_apic_ints[x].src_bus_irq);
1723
1724	return -1;		/* NOT found */
1725}
1726
1727
1728/*
1729 * given a LOGICAL APIC# and pin#, return:
1730 *  the associated INTerrupt type if found
1731 *  -1 if NOT found
1732 */
1733int
1734apic_int_type(int apic, int pin)
1735{
1736	int     x;
1737
1738	/* search each of the possible INTerrupt sources */
1739	for (x = 0; x < nintrs; ++x)
1740		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1741		    (pin == io_apic_ints[x].dst_apic_int))
1742			return (io_apic_ints[x].int_type);
1743
1744	return -1;		/* NOT found */
1745}
1746
1747int
1748apic_irq(int apic, int pin)
1749{
1750	int x;
1751	int res;
1752
1753	for (x = 0; x < nintrs; ++x)
1754		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1755		    (pin == io_apic_ints[x].dst_apic_int)) {
1756			res = io_apic_ints[x].int_vector;
1757			if (res == 0xff)
1758				return -1;
1759			if (apic != int_to_apicintpin[res].ioapic)
1760				panic("apic_irq: inconsistent table");
1761			if (pin != int_to_apicintpin[res].int_pin)
1762				panic("apic_irq inconsistent table (2)");
1763			return res;
1764		}
1765	return -1;
1766}
1767
1768
1769/*
1770 * given a LOGICAL APIC# and pin#, return:
1771 *  the associated trigger mode if found
1772 *  -1 if NOT found
1773 */
1774int
1775apic_trigger(int apic, int pin)
1776{
1777	int     x;
1778
1779	/* search each of the possible INTerrupt sources */
1780	for (x = 0; x < nintrs; ++x)
1781		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1782		    (pin == io_apic_ints[x].dst_apic_int))
1783			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1784
1785	return -1;		/* NOT found */
1786}
1787
1788
1789/*
1790 * given a LOGICAL APIC# and pin#, return:
1791 *  the associated 'active' level if found
1792 *  -1 if NOT found
1793 */
1794int
1795apic_polarity(int apic, int pin)
1796{
1797	int     x;
1798
1799	/* search each of the possible INTerrupt sources */
1800	for (x = 0; x < nintrs; ++x)
1801		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1802		    (pin == io_apic_ints[x].dst_apic_int))
1803			return (io_apic_ints[x].int_flags & 0x03);
1804
1805	return -1;		/* NOT found */
1806}
1807
1808
1809/*
1810 * set data according to MP defaults
1811 * FIXME: probably not complete yet...
1812 */
1813static void
1814default_mp_table(int type)
1815{
1816	int     ap_cpu_id;
1817#if defined(APIC_IO)
1818	int     io_apic_id;
1819	int     pin;
1820#endif	/* APIC_IO */
1821
1822#if 0
1823	printf("  MP default config type: %d\n", type);
1824	switch (type) {
1825	case 1:
1826		printf("   bus: ISA, APIC: 82489DX\n");
1827		break;
1828	case 2:
1829		printf("   bus: EISA, APIC: 82489DX\n");
1830		break;
1831	case 3:
1832		printf("   bus: EISA, APIC: 82489DX\n");
1833		break;
1834	case 4:
1835		printf("   bus: MCA, APIC: 82489DX\n");
1836		break;
1837	case 5:
1838		printf("   bus: ISA+PCI, APIC: Integrated\n");
1839		break;
1840	case 6:
1841		printf("   bus: EISA+PCI, APIC: Integrated\n");
1842		break;
1843	case 7:
1844		printf("   bus: MCA+PCI, APIC: Integrated\n");
1845		break;
1846	default:
1847		printf("   future type\n");
1848		break;
1849		/* NOTREACHED */
1850	}
1851#endif	/* 0 */
1852
1853	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1854	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1855
1856	/* BSP */
1857	CPU_TO_ID(0) = boot_cpu_id;
1858	ID_TO_CPU(boot_cpu_id) = 0;
1859
1860	/* one and only AP */
1861	CPU_TO_ID(1) = ap_cpu_id;
1862	ID_TO_CPU(ap_cpu_id) = 1;
1863
1864#if defined(APIC_IO)
1865	/* one and only IO APIC */
1866	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1867
1868	/*
1869	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1870	 * necessary as some hardware isn't properly setting up the IO APIC
1871	 */
1872#if defined(REALLY_ANAL_IOAPICID_VALUE)
1873	if (io_apic_id != 2) {
1874#else
1875	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1876#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1877		io_apic_set_id(0, 2);
1878		io_apic_id = 2;
1879	}
1880	IO_TO_ID(0) = io_apic_id;
1881	ID_TO_IO(io_apic_id) = 0;
1882#endif	/* APIC_IO */
1883
1884	/* fill out bus entries */
1885	switch (type) {
1886	case 1:
1887	case 2:
1888	case 3:
1889	case 4:
1890	case 5:
1891	case 6:
1892	case 7:
1893		bus_data[0].bus_id = default_data[type - 1][1];
1894		bus_data[0].bus_type = default_data[type - 1][2];
1895		bus_data[1].bus_id = default_data[type - 1][3];
1896		bus_data[1].bus_type = default_data[type - 1][4];
1897		break;
1898
1899	/* case 4: case 7:		   MCA NOT supported */
1900	default:		/* illegal/reserved */
1901		panic("BAD default MP config: %d", type);
1902		/* NOTREACHED */
1903	}
1904
1905#if defined(APIC_IO)
1906	/* general cases from MP v1.4, table 5-2 */
1907	for (pin = 0; pin < 16; ++pin) {
1908		io_apic_ints[pin].int_type = 0;
1909		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1910		io_apic_ints[pin].src_bus_id = 0;
1911		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1912		io_apic_ints[pin].dst_apic_id = io_apic_id;
1913		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1914	}
1915
1916	/* special cases from MP v1.4, table 5-2 */
1917	if (type == 2) {
1918		io_apic_ints[2].int_type = 0xff;	/* N/C */
1919		io_apic_ints[13].int_type = 0xff;	/* N/C */
1920#if !defined(APIC_MIXED_MODE)
1921		/** FIXME: ??? */
1922		panic("sorry, can't support type 2 default yet");
1923#endif	/* APIC_MIXED_MODE */
1924	}
1925	else
1926		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1927
1928	if (type == 7)
1929		io_apic_ints[0].int_type = 0xff;	/* N/C */
1930	else
1931		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1932#endif	/* APIC_IO */
1933}
1934
1935
1936/*
1937 * start each AP in our list
1938 */
1939static int
1940start_all_aps(u_int boot_addr)
1941{
1942	int     x, i, pg;
1943	u_char  mpbiosreason;
1944	u_long  mpbioswarmvec;
1945	struct globaldata *gd;
1946	char *stack;
1947	uintptr_t kptbase;
1948
1949	POSTCODE(START_ALL_APS_POST);
1950
1951	/* initialize BSP's local APIC */
1952	apic_initialize();
1953	bsp_apic_ready = 1;
1954
1955	/* install the AP 1st level boot code */
1956	install_ap_tramp(boot_addr);
1957
1958
1959	/* save the current value of the warm-start vector */
1960	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1961#ifndef PC98
1962	outb(CMOS_REG, BIOS_RESET);
1963	mpbiosreason = inb(CMOS_DATA);
1964#endif
1965
1966	/* record BSP in CPU map */
1967	all_cpus = 1;
1968
1969	/* set up temporary P==V mapping for AP boot */
1970	/* XXX this is a hack, we should boot the AP on its own stack/PTD */
1971	kptbase = (uintptr_t)(void *)KPTphys;
1972	for (x = 0; x < NKPT; x++)
1973		PTD[x] = (pd_entry_t)(PG_V | PG_RW |
1974		    ((kptbase + x * PAGE_SIZE) & PG_FRAME));
1975	invltlb();
1976
1977	/* start each AP */
1978	for (x = 1; x <= mp_naps; ++x) {
1979
1980		/* This is a bit verbose, it will go away soon.  */
1981
1982		/* first page of AP's private space */
1983		pg = x * i386_btop(sizeof(struct privatespace));
1984
1985		/* allocate a new private data page */
1986		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1987
1988		/* wire it into the private page table page */
1989		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1990
1991		/* allocate and set up an idle stack data page */
1992		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1993		for (i = 0; i < UPAGES; i++)
1994			SMPpt[pg + 1 + i] = (pt_entry_t)
1995			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1996
1997		/* prime data page for it to use */
1998		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1999		gd->gd_cpuid = x;
2000
2001		/* setup a vector to our boot code */
2002		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2003		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2004#ifndef PC98
2005		outb(CMOS_REG, BIOS_RESET);
2006		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2007#endif
2008
2009		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2010		bootAP = x;
2011
2012		/* attempt to start the Application Processor */
2013		CHECK_INIT(99);	/* setup checkpoints */
2014		if (!start_ap(x, boot_addr)) {
2015			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2016			CHECK_PRINT("trace");	/* show checkpoints */
2017			/* better panic as the AP may be running loose */
2018			printf("panic y/n? [y] ");
2019			if (cngetc() != 'n')
2020				panic("bye-bye");
2021		}
2022		CHECK_PRINT("trace");		/* show checkpoints */
2023
2024		/* record its version info */
2025		cpu_apic_versions[x] = cpu_apic_versions[0];
2026
2027		all_cpus |= (1 << x);		/* record AP in CPU map */
2028	}
2029
2030	/* build our map of 'other' CPUs */
2031	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2032
2033	/* fill in our (BSP) APIC version */
2034	cpu_apic_versions[0] = lapic.version;
2035
2036	/* restore the warmstart vector */
2037	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2038#ifndef PC98
2039	outb(CMOS_REG, BIOS_RESET);
2040	outb(CMOS_DATA, mpbiosreason);
2041#endif
2042
2043	/*
2044	 * Set up the idle context for the BSP.  Similar to above except
2045	 * that some was done by locore, some by pmap.c and some is implicit
2046	 * because the BSP is cpu#0 and the page is initially zero, and also
2047	 * because we can refer to variables by name on the BSP..
2048	 */
2049
2050	/* Allocate and setup BSP idle stack */
2051	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2052	for (i = 0; i < UPAGES; i++)
2053		SMPpt[1 + i] = (pt_entry_t)
2054		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2055
2056	for (x = 0; x < NKPT; x++)
2057		PTD[x] = 0;
2058	pmap_set_opt();
2059
2060	/* number of APs actually started */
2061	return mp_ncpus - 1;
2062}
2063
2064
2065/*
2066 * load the 1st level AP boot code into base memory.
2067 */
2068
2069/* targets for relocation */
2070extern void bigJump(void);
2071extern void bootCodeSeg(void);
2072extern void bootDataSeg(void);
2073extern void MPentry(void);
2074extern u_int MP_GDT;
2075extern u_int mp_gdtbase;
2076
2077static void
2078install_ap_tramp(u_int boot_addr)
2079{
2080	int     x;
2081	int     size = *(int *) ((u_long) & bootMP_size);
2082	u_char *src = (u_char *) ((u_long) bootMP);
2083	u_char *dst = (u_char *) boot_addr + KERNBASE;
2084	u_int   boot_base = (u_int) bootMP;
2085	u_int8_t *dst8;
2086	u_int16_t *dst16;
2087	u_int32_t *dst32;
2088
2089	POSTCODE(INSTALL_AP_TRAMP_POST);
2090
2091	for (x = 0; x < size; ++x)
2092		*dst++ = *src++;
2093
2094	/*
2095	 * modify addresses in code we just moved to basemem. unfortunately we
2096	 * need fairly detailed info about mpboot.s for this to work.  changes
2097	 * to mpboot.s might require changes here.
2098	 */
2099
2100	/* boot code is located in KERNEL space */
2101	dst = (u_char *) boot_addr + KERNBASE;
2102
2103	/* modify the lgdt arg */
2104	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2105	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2106
2107	/* modify the ljmp target for MPentry() */
2108	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2109	*dst32 = ((u_int) MPentry - KERNBASE);
2110
2111	/* modify the target for boot code segment */
2112	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2113	dst8 = (u_int8_t *) (dst16 + 1);
2114	*dst16 = (u_int) boot_addr & 0xffff;
2115	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2116
2117	/* modify the target for boot data segment */
2118	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2119	dst8 = (u_int8_t *) (dst16 + 1);
2120	*dst16 = (u_int) boot_addr & 0xffff;
2121	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2122}
2123
2124
2125/*
2126 * this function starts the AP (application processor) identified
2127 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2128 * to accomplish this.  This is necessary because of the nuances
2129 * of the different hardware we might encounter.  It ain't pretty,
2130 * but it seems to work.
2131 */
2132static int
2133start_ap(int logical_cpu, u_int boot_addr)
2134{
2135	int     physical_cpu;
2136	int     vector;
2137	int     cpus;
2138	u_long  icr_lo, icr_hi;
2139
2140	POSTCODE(START_AP_POST);
2141
2142	/* get the PHYSICAL APIC ID# */
2143	physical_cpu = CPU_TO_ID(logical_cpu);
2144
2145	/* calculate the vector */
2146	vector = (boot_addr >> 12) & 0xff;
2147
2148	/* used as a watchpoint to signal AP startup */
2149	cpus = mp_ncpus;
2150
2151	/*
2152	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2153	 * and running the target CPU. OR this INIT IPI might be latched (P5
2154	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2155	 * ignored.
2156	 */
2157
2158	/* setup the address for the target AP */
2159	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2160	icr_hi |= (physical_cpu << 24);
2161	lapic.icr_hi = icr_hi;
2162
2163	/* do an INIT IPI: assert RESET */
2164	icr_lo = lapic.icr_lo & 0xfff00000;
2165	lapic.icr_lo = icr_lo | 0x0000c500;
2166
2167	/* wait for pending status end */
2168	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2169		 /* spin */ ;
2170
2171	/* do an INIT IPI: deassert RESET */
2172	lapic.icr_lo = icr_lo | 0x00008500;
2173
2174	/* wait for pending status end */
2175	u_sleep(10000);		/* wait ~10mS */
2176	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2177		 /* spin */ ;
2178
2179	/*
2180	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2181	 * latched, (P5 bug) this 1st STARTUP would then terminate
2182	 * immediately, and the previously started INIT IPI would continue. OR
2183	 * the previous INIT IPI has already run. and this STARTUP IPI will
2184	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2185	 * will run.
2186	 */
2187
2188	/* do a STARTUP IPI */
2189	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2190	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2191		 /* spin */ ;
2192	u_sleep(200);		/* wait ~200uS */
2193
2194	/*
2195	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2196	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2197	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2198	 * recognized after hardware RESET or INIT IPI.
2199	 */
2200
2201	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2202	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2203		 /* spin */ ;
2204	u_sleep(200);		/* wait ~200uS */
2205
2206	/* wait for it to start */
2207	set_apic_timer(5000000);/* == 5 seconds */
2208	while (read_apic_timer())
2209		if (mp_ncpus > cpus)
2210			return 1;	/* return SUCCESS */
2211
2212	return 0;		/* return FAILURE */
2213}
2214
2215/*
2216 * Flush the TLB on all other CPU's
2217 *
2218 * XXX: Needs to handshake and wait for completion before proceding.
2219 */
2220void
2221smp_invltlb(void)
2222{
2223#if defined(APIC_IO)
2224	if (smp_started && invltlb_ok)
2225		smp_ipi_all_but_self(IPI_INVLTLB);
2226#endif  /* APIC_IO */
2227}
2228
2229void
2230invlpg(u_int addr)
2231{
2232	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2233
2234	/* send a message to the other CPUs */
2235	smp_invltlb();
2236}
2237
2238void
2239invltlb(void)
2240{
2241	u_long  temp;
2242
2243	/*
2244	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2245	 * inlined.
2246	 */
2247	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2248
2249	/* send a message to the other CPUs */
2250	smp_invltlb();
2251}
2252
2253
2254/*
2255 * This is called once the rest of the system is up and running and we're
2256 * ready to let the AP's out of the pen.
2257 */
2258void
2259ap_init(void)
2260{
2261	u_int	apic_id;
2262
2263	/* spin until all the AP's are ready */
2264	while (!aps_ready)
2265		/* spin */ ;
2266
2267	/*
2268	 * Set curproc to our per-cpu idleproc so that mutexes have
2269	 * something unique to lock with.
2270	 */
2271	PCPU_SET(curproc, PCPU_GET(idleproc));
2272	PCPU_SET(spinlocks, NULL);
2273
2274	/* lock against other AP's that are waking up */
2275	mtx_lock_spin(&ap_boot_mtx);
2276
2277	/* BSP may have changed PTD while we're waiting for the lock */
2278	cpu_invltlb();
2279
2280	smp_cpus++;
2281
2282#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2283	lidt(&r_idt);
2284#endif
2285
2286	/* Build our map of 'other' CPUs. */
2287	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2288
2289	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2290
2291	/* set up CPU registers and state */
2292	cpu_setregs();
2293
2294	/* set up FPU state on the AP */
2295	npxinit(__INITIAL_NPXCW__);
2296
2297	/* A quick check from sanity claus */
2298	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2299	if (PCPU_GET(cpuid) != apic_id) {
2300		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2301		printf("SMP: apic_id = %d\n", apic_id);
2302		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2303		panic("cpuid mismatch! boom!!");
2304	}
2305
2306	/* Init local apic for irq's */
2307	apic_initialize();
2308
2309	/* Set memory range attributes for this CPU to match the BSP */
2310	mem_range_AP_init();
2311
2312	/*
2313	 * Activate smp_invltlb, although strictly speaking, this isn't
2314	 * quite correct yet.  We should have a bitfield for cpus willing
2315	 * to accept TLB flush IPI's or something and sync them.
2316	 */
2317	if (smp_cpus == mp_ncpus) {
2318		invltlb_ok = 1;
2319		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2320		smp_active = 1;	 /* historic */
2321	}
2322
2323	/* let other AP's wake up now */
2324	mtx_unlock_spin(&ap_boot_mtx);
2325
2326	/* wait until all the AP's are up */
2327	while (smp_started == 0)
2328		; /* nothing */
2329
2330	microuptime(PCPU_PTR(switchtime));
2331	PCPU_SET(switchticks, ticks);
2332
2333	/* ok, now grab sched_lock and enter the scheduler */
2334	enable_intr();
2335	mtx_lock_spin(&sched_lock);
2336	cpu_throw();	/* doesn't return */
2337
2338	panic("scheduler returned us to ap_init");
2339}
2340
2341#ifdef BETTER_CLOCK
2342
2343#define CHECKSTATE_USER	0
2344#define CHECKSTATE_SYS	1
2345#define CHECKSTATE_INTR	2
2346
2347/* Do not staticize.  Used from apic_vector.s */
2348struct proc*	checkstate_curproc[MAXCPU];
2349int		checkstate_cpustate[MAXCPU];
2350u_long		checkstate_pc[MAXCPU];
2351
2352#define PC_TO_INDEX(pc, prof)				\
2353        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2354            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2355
2356static void
2357addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2358{
2359	int i;
2360	struct uprof *prof;
2361	u_long pc;
2362
2363	pc = checkstate_pc[id];
2364	prof = &p->p_stats->p_prof;
2365	if (pc >= prof->pr_off &&
2366	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2367		mtx_assert(&sched_lock, MA_OWNED);
2368		if ((p->p_sflag & PS_OWEUPC) == 0) {
2369			prof->pr_addr = pc;
2370			prof->pr_ticks = 1;
2371			p->p_sflag |= PS_OWEUPC;
2372		}
2373		*astmap |= (1 << id);
2374	}
2375}
2376
2377static void
2378forwarded_statclock(int id, int pscnt, int *astmap)
2379{
2380	struct pstats *pstats;
2381	long rss;
2382	struct rusage *ru;
2383	struct vmspace *vm;
2384	int cpustate;
2385	struct proc *p;
2386#ifdef GPROF
2387	register struct gmonparam *g;
2388	int i;
2389#endif
2390
2391	mtx_assert(&sched_lock, MA_OWNED);
2392	p = checkstate_curproc[id];
2393	cpustate = checkstate_cpustate[id];
2394
2395	/* XXX */
2396	if (p->p_ithd)
2397		cpustate = CHECKSTATE_INTR;
2398	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2399		cpustate = CHECKSTATE_SYS;
2400
2401	switch (cpustate) {
2402	case CHECKSTATE_USER:
2403		if (p->p_sflag & PS_PROFIL)
2404			addupc_intr_forwarded(p, id, astmap);
2405		if (pscnt > 1)
2406			return;
2407		p->p_uticks++;
2408		if (p->p_nice > NZERO)
2409			cp_time[CP_NICE]++;
2410		else
2411			cp_time[CP_USER]++;
2412		break;
2413	case CHECKSTATE_SYS:
2414#ifdef GPROF
2415		/*
2416		 * Kernel statistics are just like addupc_intr, only easier.
2417		 */
2418		g = &_gmonparam;
2419		if (g->state == GMON_PROF_ON) {
2420			i = checkstate_pc[id] - g->lowpc;
2421			if (i < g->textsize) {
2422				i /= HISTFRACTION * sizeof(*g->kcount);
2423				g->kcount[i]++;
2424			}
2425		}
2426#endif
2427		if (pscnt > 1)
2428			return;
2429
2430		p->p_sticks++;
2431		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2432			cp_time[CP_IDLE]++;
2433		else
2434			cp_time[CP_SYS]++;
2435		break;
2436	case CHECKSTATE_INTR:
2437	default:
2438#ifdef GPROF
2439		/*
2440		 * Kernel statistics are just like addupc_intr, only easier.
2441		 */
2442		g = &_gmonparam;
2443		if (g->state == GMON_PROF_ON) {
2444			i = checkstate_pc[id] - g->lowpc;
2445			if (i < g->textsize) {
2446				i /= HISTFRACTION * sizeof(*g->kcount);
2447				g->kcount[i]++;
2448			}
2449		}
2450#endif
2451		if (pscnt > 1)
2452			return;
2453		KASSERT(p != NULL, ("NULL process in interrupt state"));
2454		p->p_iticks++;
2455		cp_time[CP_INTR]++;
2456	}
2457
2458	schedclock(p);
2459
2460	/* Update resource usage integrals and maximums. */
2461	if ((pstats = p->p_stats) != NULL &&
2462	    (ru = &pstats->p_ru) != NULL &&
2463	    (vm = p->p_vmspace) != NULL) {
2464		ru->ru_ixrss += pgtok(vm->vm_tsize);
2465		ru->ru_idrss += pgtok(vm->vm_dsize);
2466		ru->ru_isrss += pgtok(vm->vm_ssize);
2467		rss = pgtok(vmspace_resident_count(vm));
2468		if (ru->ru_maxrss < rss)
2469			ru->ru_maxrss = rss;
2470	}
2471}
2472
2473void
2474forward_statclock(int pscnt)
2475{
2476	int map;
2477	int id;
2478	int i;
2479
2480	/* Kludge. We don't yet have separate locks for the interrupts
2481	 * and the kernel. This means that we cannot let the other processors
2482	 * handle complex interrupts while inhibiting them from entering
2483	 * the kernel in a non-interrupt context.
2484	 *
2485	 * What we can do, without changing the locking mechanisms yet,
2486	 * is letting the other processors handle a very simple interrupt
2487	 * (wich determines the processor states), and do the main
2488	 * work ourself.
2489	 */
2490
2491	CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2492
2493	if (!smp_started || !invltlb_ok || cold || panicstr)
2494		return;
2495
2496	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2497
2498	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2499	checkstate_probed_cpus = 0;
2500	if (map != 0)
2501		smp_ipi_selected(map, IPI_CHECKSTATE);
2502
2503	i = 0;
2504	while (checkstate_probed_cpus != map) {
2505		/* spin */
2506		i++;
2507		if (i == 100000) {
2508#ifdef BETTER_CLOCK_DIAGNOSTIC
2509			printf("forward_statclock: checkstate %x\n",
2510			       checkstate_probed_cpus);
2511#endif
2512			break;
2513		}
2514	}
2515
2516	/*
2517	 * Step 2: walk through other processors processes, update ticks and
2518	 * profiling info.
2519	 */
2520
2521	map = 0;
2522	for (id = 0; id < mp_ncpus; id++) {
2523		if (id == PCPU_GET(cpuid))
2524			continue;
2525		if (((1 << id) & checkstate_probed_cpus) == 0)
2526			continue;
2527		forwarded_statclock(id, pscnt, &map);
2528	}
2529	if (map != 0) {
2530		checkstate_need_ast |= map;
2531		smp_ipi_selected(map, IPI_AST);
2532		i = 0;
2533		while ((checkstate_need_ast & map) != 0) {
2534			/* spin */
2535			i++;
2536			if (i > 100000) {
2537#ifdef BETTER_CLOCK_DIAGNOSTIC
2538				printf("forward_statclock: dropped ast 0x%x\n",
2539				       checkstate_need_ast & map);
2540#endif
2541				break;
2542			}
2543		}
2544	}
2545}
2546
2547void
2548forward_hardclock(int pscnt)
2549{
2550	int map;
2551	int id;
2552	struct proc *p;
2553	struct pstats *pstats;
2554	int i;
2555
2556	/* Kludge. We don't yet have separate locks for the interrupts
2557	 * and the kernel. This means that we cannot let the other processors
2558	 * handle complex interrupts while inhibiting them from entering
2559	 * the kernel in a non-interrupt context.
2560	 *
2561	 * What we can do, without changing the locking mechanisms yet,
2562	 * is letting the other processors handle a very simple interrupt
2563	 * (wich determines the processor states), and do the main
2564	 * work ourself.
2565	 */
2566
2567	CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2568
2569	if (!smp_started || !invltlb_ok || cold || panicstr)
2570		return;
2571
2572	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2573
2574	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2575	checkstate_probed_cpus = 0;
2576	if (map != 0)
2577		smp_ipi_selected(map, IPI_CHECKSTATE);
2578
2579	i = 0;
2580	while (checkstate_probed_cpus != map) {
2581		/* spin */
2582		i++;
2583		if (i == 100000) {
2584#ifdef BETTER_CLOCK_DIAGNOSTIC
2585			printf("forward_hardclock: checkstate %x\n",
2586			       checkstate_probed_cpus);
2587#endif
2588			break;
2589		}
2590	}
2591
2592	/*
2593	 * Step 2: walk through other processors processes, update virtual
2594	 * timer and profiling timer. If stathz == 0, also update ticks and
2595	 * profiling info.
2596	 */
2597
2598	map = 0;
2599	for (id = 0; id < mp_ncpus; id++) {
2600		if (id == PCPU_GET(cpuid))
2601			continue;
2602		if (((1 << id) & checkstate_probed_cpus) == 0)
2603			continue;
2604		p = checkstate_curproc[id];
2605		if (p) {
2606			pstats = p->p_stats;
2607			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2608			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2609			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2610				p->p_sflag |= PS_ALRMPEND;
2611				map |= (1 << id);
2612			}
2613			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2614			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2615				p->p_sflag |= PS_PROFPEND;
2616				map |= (1 << id);
2617			}
2618		}
2619		if (stathz == 0) {
2620			forwarded_statclock( id, pscnt, &map);
2621		}
2622	}
2623	if (map != 0) {
2624		checkstate_need_ast |= map;
2625		smp_ipi_selected(map, IPI_AST);
2626		i = 0;
2627		while ((checkstate_need_ast & map) != 0) {
2628			/* spin */
2629			i++;
2630			if (i > 100000) {
2631#ifdef BETTER_CLOCK_DIAGNOSTIC
2632				printf("forward_hardclock: dropped ast 0x%x\n",
2633				       checkstate_need_ast & map);
2634#endif
2635				break;
2636			}
2637		}
2638	}
2639}
2640
2641#endif /* BETTER_CLOCK */
2642
2643void
2644forward_signal(struct proc *p)
2645{
2646	int map;
2647	int id;
2648	int i;
2649
2650	/* Kludge. We don't yet have separate locks for the interrupts
2651	 * and the kernel. This means that we cannot let the other processors
2652	 * handle complex interrupts while inhibiting them from entering
2653	 * the kernel in a non-interrupt context.
2654	 *
2655	 * What we can do, without changing the locking mechanisms yet,
2656	 * is letting the other processors handle a very simple interrupt
2657	 * (wich determines the processor states), and do the main
2658	 * work ourself.
2659	 */
2660
2661	CTR1(KTR_SMP, "forward_signal(%p)", p);
2662
2663	if (!smp_started || !invltlb_ok || cold || panicstr)
2664		return;
2665	if (!forward_signal_enabled)
2666		return;
2667	mtx_lock_spin(&sched_lock);
2668	while (1) {
2669		if (p->p_stat != SRUN) {
2670			mtx_unlock_spin(&sched_lock);
2671			return;
2672		}
2673		id = p->p_oncpu;
2674		mtx_unlock_spin(&sched_lock);
2675		if (id == 0xff)
2676			return;
2677		map = (1<<id);
2678		checkstate_need_ast |= map;
2679		smp_ipi_selected(map, IPI_AST);
2680		i = 0;
2681		while ((checkstate_need_ast & map) != 0) {
2682			/* spin */
2683			i++;
2684			if (i > 100000) {
2685#if 0
2686				printf("forward_signal: dropped ast 0x%x\n",
2687				       checkstate_need_ast & map);
2688#endif
2689				break;
2690			}
2691		}
2692		mtx_lock_spin(&sched_lock);
2693		if (id == p->p_oncpu) {
2694			mtx_unlock_spin(&sched_lock);
2695			return;
2696		}
2697	}
2698}
2699
2700void
2701forward_roundrobin(void)
2702{
2703	u_int map;
2704	int i;
2705
2706	CTR0(KTR_SMP, "forward_roundrobin()");
2707
2708	if (!smp_started || !invltlb_ok || cold || panicstr)
2709		return;
2710	if (!forward_roundrobin_enabled)
2711		return;
2712	resched_cpus |= PCPU_GET(other_cpus);
2713	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2714#if 1
2715	smp_ipi_selected(map, IPI_AST);
2716#else
2717	smp_ipi_all_but_self(IPI_AST);
2718#endif
2719	i = 0;
2720	while ((checkstate_need_ast & map) != 0) {
2721		/* spin */
2722		i++;
2723		if (i > 100000) {
2724#if 0
2725			printf("forward_roundrobin: dropped ast 0x%x\n",
2726			       checkstate_need_ast & map);
2727#endif
2728			break;
2729		}
2730	}
2731}
2732
2733/*
2734 * When called the executing CPU will send an IPI to all other CPUs
2735 *  requesting that they halt execution.
2736 *
2737 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2738 *
2739 *  - Signals all CPUs in map to stop.
2740 *  - Waits for each to stop.
2741 *
2742 * Returns:
2743 *  -1: error
2744 *   0: NA
2745 *   1: ok
2746 *
2747 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2748 *            from executing at same time.
2749 */
2750int
2751stop_cpus(u_int map)
2752{
2753	int count = 0;
2754
2755	if (!smp_started)
2756		return 0;
2757
2758	/* send the Xcpustop IPI to all CPUs in map */
2759	smp_ipi_selected(map, IPI_STOP);
2760
2761	while (count++ < 100000 && (stopped_cpus & map) != map)
2762		/* spin */ ;
2763
2764#ifdef DIAGNOSTIC
2765	if ((stopped_cpus & map) != map)
2766		printf("Warning: CPUs 0x%x did not stop!\n",
2767		    (~(stopped_cpus & map)) & map);
2768#endif
2769
2770	return 1;
2771}
2772
2773
2774/*
2775 * Called by a CPU to restart stopped CPUs.
2776 *
2777 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2778 *
2779 *  - Signals all CPUs in map to restart.
2780 *  - Waits for each to restart.
2781 *
2782 * Returns:
2783 *  -1: error
2784 *   0: NA
2785 *   1: ok
2786 */
2787int
2788restart_cpus(u_int map)
2789{
2790	int count = 0;
2791
2792	if (!smp_started)
2793		return 0;
2794
2795	started_cpus = map;		/* signal other cpus to restart */
2796
2797	/* wait for each to clear its bit */
2798	while (count++ < 100000 && (stopped_cpus & map) != 0)
2799		/* spin */ ;
2800
2801#ifdef DIAGNOSTIC
2802	if ((stopped_cpus & map) != 0)
2803		printf("Warning: CPUs 0x%x did not restart!\n",
2804		    (~(stopped_cpus & map)) & map);
2805#endif
2806
2807	return 1;
2808}
2809
2810
2811#ifdef APIC_INTR_REORDER
2812/*
2813 *	Maintain mapping from softintr vector to isr bit in local apic.
2814 */
2815void
2816set_lapic_isrloc(int intr, int vector)
2817{
2818	if (intr < 0 || intr > 32)
2819		panic("set_apic_isrloc: bad intr argument: %d",intr);
2820	if (vector < ICU_OFFSET || vector > 255)
2821		panic("set_apic_isrloc: bad vector argument: %d",vector);
2822	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2823	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2824}
2825#endif
2826
2827/*
2828 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2829 * (if specified), rendezvous, execute the action function (if specified),
2830 * rendezvous again, execute the teardown function (if specified), and then
2831 * resume.
2832 *
2833 * Note that the supplied external functions _must_ be reentrant and aware
2834 * that they are running in parallel and in an unknown lock context.
2835 */
2836static void (*smp_rv_setup_func)(void *arg);
2837static void (*smp_rv_action_func)(void *arg);
2838static void (*smp_rv_teardown_func)(void *arg);
2839static void *smp_rv_func_arg;
2840static volatile int smp_rv_waiters[2];
2841
2842void
2843smp_rendezvous_action(void)
2844{
2845	/* setup function */
2846	if (smp_rv_setup_func != NULL)
2847		smp_rv_setup_func(smp_rv_func_arg);
2848	/* spin on entry rendezvous */
2849	atomic_add_int(&smp_rv_waiters[0], 1);
2850	while (smp_rv_waiters[0] < mp_ncpus)
2851		;
2852	/* action function */
2853	if (smp_rv_action_func != NULL)
2854		smp_rv_action_func(smp_rv_func_arg);
2855	/* spin on exit rendezvous */
2856	atomic_add_int(&smp_rv_waiters[1], 1);
2857	while (smp_rv_waiters[1] < mp_ncpus)
2858		;
2859	/* teardown function */
2860	if (smp_rv_teardown_func != NULL)
2861		smp_rv_teardown_func(smp_rv_func_arg);
2862}
2863
2864void
2865smp_rendezvous(void (* setup_func)(void *),
2866	       void (* action_func)(void *),
2867	       void (* teardown_func)(void *),
2868	       void *arg)
2869{
2870
2871	/* obtain rendezvous lock */
2872	mtx_lock_spin(&smp_rv_mtx);
2873
2874	/* set static function pointers */
2875	smp_rv_setup_func = setup_func;
2876	smp_rv_action_func = action_func;
2877	smp_rv_teardown_func = teardown_func;
2878	smp_rv_func_arg = arg;
2879	smp_rv_waiters[0] = 0;
2880	smp_rv_waiters[1] = 0;
2881
2882	/*
2883	 * signal other processors, which will enter the IPI with interrupts off
2884	 */
2885	smp_ipi_all_but_self(IPI_RENDEZVOUS);
2886
2887	/* call executor function */
2888	smp_rendezvous_action();
2889
2890	/* release lock */
2891	mtx_unlock_spin(&smp_rv_mtx);
2892}
2893
2894/*
2895 * send an IPI to a set of cpus.
2896 */
2897void
2898smp_ipi_selected(u_int32_t cpus, u_int ipi)
2899{
2900
2901	CTR2(KTR_SMP, __func__ ": cpus: %x ipi: %x", cpus, ipi);
2902	selected_apic_ipi(cpus, ipi, APIC_DELMODE_FIXED);
2903}
2904
2905/*
2906 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
2907 */
2908void
2909smp_ipi_all(u_int ipi)
2910{
2911
2912	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2913	apic_ipi(APIC_DEST_ALLISELF, ipi, APIC_DELMODE_FIXED);
2914}
2915
2916/*
2917 * send an IPI to all CPUs EXCEPT myself
2918 */
2919void
2920smp_ipi_all_but_self(u_int ipi)
2921{
2922
2923	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2924	apic_ipi(APIC_DEST_ALLESELF, ipi, APIC_DELMODE_FIXED);
2925}
2926
2927/*
2928 * send an IPI to myself
2929 */
2930void
2931smp_ipi_self(u_int ipi)
2932{
2933
2934	CTR1(KTR_SMP, __func__ ": ipi: %x", ipi);
2935	apic_ipi(APIC_DEST_SELF, ipi, APIC_DELMODE_FIXED);
2936}
2937
2938void
2939release_aps(void *dummy __unused)
2940{
2941	atomic_store_rel_int(&aps_ready, 1);
2942}
2943
2944SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2945