mp_machdep.c revision 71576
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/amd64/amd64/mp_machdep.c 71576 2001-01-24 12:35:55Z jasone $
26 */
27
28#include "opt_cpu.h"
29#include "opt_user_ldt.h"
30
31#ifdef SMP
32#include <machine/smptests.h>
33#else
34#error
35#endif
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#ifdef BETTER_CLOCK
47#include <sys/dkstat.h>
48#endif
49#include <sys/cons.h>	/* cngetc() */
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56#ifdef BETTER_CLOCK
57#include <sys/lock.h>
58#include <vm/vm_map.h>
59#include <sys/user.h>
60#ifdef GPROF
61#include <sys/gmon.h>
62#endif
63#endif
64
65#include <machine/smp.h>
66#include <machine/apic.h>
67#include <machine/atomic.h>
68#include <machine/cpufunc.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h>		/* setidt() */
79#include <i386/isa/icu.h>		/* IPIs */
80#include <i386/isa/intr_machdep.h>	/* IPIs */
81#endif	/* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1	mpfps->mpfb1
87#endif  /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET		0
90#define WARMBOOT_OFF		(KERNBASE + 0x0467)
91#define WARMBOOT_SEG		(KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE		(0xe8000)
95#define BIOS_SIZE		(0x18000)
96#else
97#define BIOS_BASE		(0xf0000)
98#define BIOS_SIZE		(0x10000)
99#endif
100#define BIOS_COUNT		(BIOS_SIZE/4)
101
102#define CMOS_REG		(0x70)
103#define CMOS_DATA		(0x71)
104#define BIOS_RESET		(0x0f)
105#define BIOS_WARM		(0x0a)
106
107#define PROCENTRY_FLAG_EN	0x01
108#define PROCENTRY_FLAG_BP	0x02
109#define IOAPICENTRY_FLAG_EN	0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114	char    signature[4];
115	void   *pap;
116	u_char  length;
117	u_char  spec_rev;
118	u_char  checksum;
119	u_char  mpfb1;
120	u_char  mpfb2;
121	u_char  mpfb3;
122	u_char  mpfb4;
123	u_char  mpfb5;
124}      *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128	char    signature[4];
129	u_short base_table_length;
130	u_char  spec_rev;
131	u_char  checksum;
132	u_char  oem_id[8];
133	u_char  product_id[12];
134	void   *oem_table_pointer;
135	u_short oem_table_size;
136	u_short entry_count;
137	void   *apic_address;
138	u_short extended_table_length;
139	u_char  extended_table_checksum;
140	u_char  reserved;
141}      *mpcth_t;
142
143
144typedef struct PROCENTRY {
145	u_char  type;
146	u_char  apic_id;
147	u_char  apic_version;
148	u_char  cpu_flags;
149	u_long  cpu_signature;
150	u_long  feature_flags;
151	u_long  reserved1;
152	u_long  reserved2;
153}      *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156	u_char  type;
157	u_char  bus_id;
158	char    bus_type[6];
159}      *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162	u_char  type;
163	u_char  apic_id;
164	u_char  apic_version;
165	u_char  apic_flags;
166	void   *apic_address;
167}      *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170	u_char  type;
171	u_char  int_type;
172	u_short int_flags;
173	u_char  src_bus_id;
174	u_char  src_bus_irq;
175	u_char  dst_apic_id;
176	u_char  dst_apic_int;
177}      *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181	u_char  type;
182	u_char  length;
183	char    name[16];
184}       basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D);				\
199	CHECK_WRITE(0x34, (D));			\
200	CHECK_WRITE(0x35, (D));			\
201	CHECK_WRITE(0x36, (D));			\
202	CHECK_WRITE(0x37, (D));			\
203	CHECK_WRITE(0x38, (D));			\
204	CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S);				\
207	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208	   (S),					\
209	   CHECK_READ(0x34),			\
210	   CHECK_READ(0x35),			\
211	   CHECK_READ(0x36),			\
212	   CHECK_READ(0x37),			\
213	   CHECK_READ(0x38),			\
214	   CHECK_READ(0x39));
215
216#else				/* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif				/* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST	0x10
227#define MP_PROBE_POST		0x11
228#define MPTABLE_PASS1_POST	0x12
229
230#define MP_START_POST		0x13
231#define MP_ENABLE_POST		0x14
232#define MPTABLE_PASS2_POST	0x15
233
234#define START_ALL_APS_POST	0x16
235#define INSTALL_AP_TRAMP_POST	0x17
236#define START_AP_POST		0x18
237
238#define MP_ANNOUNCE_POST	0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct mtx			ap_boot_mtx;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int	current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250int	mp_ncpus;		/* # of CPUs, including BSP */
251int	mp_naps;		/* # of Applications processors */
252int	mp_nbusses;		/* # of busses */
253int	mp_napics;		/* # of IO APICs */
254int	boot_cpu_id;		/* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257extern	int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_REORDER
263struct {
264	volatile int *location;
265	int bit;
266} apic_isrbit_location[32];
267#endif
268
269struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
270
271/*
272 * APIC ID logical/physical mapping structures.
273 * We oversize these to simplify boot-time config.
274 */
275int     cpu_num_to_apic_id[NAPICID];
276int     io_num_to_apic_id[NAPICID];
277int     apic_id_to_logical[NAPICID];
278
279
280/* Bitmap of all available CPUs */
281u_int	all_cpus;
282
283/* AP uses this during bootstrap.  Do not staticize.  */
284char *bootSTK;
285static int bootAP;
286
287/* Hotwire a 0->4MB V==P mapping */
288extern pt_entry_t *KPTphys;
289
290/* SMP page table page */
291extern pt_entry_t *SMPpt;
292
293struct pcb stoppcbs[MAXCPU];
294
295int smp_started;		/* has the system started? */
296int smp_active = 0;		/* are the APs allowed to run? */
297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298
299/* XXX maybe should be hw.ncpu */
300static int smp_cpus = 1;	/* how many cpu's running */
301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302
303int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305
306/* Enable forwarding of a signal to a process running on a different CPU */
307static int forward_signal_enabled = 1;
308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309	   &forward_signal_enabled, 0, "");
310
311/* Enable forwarding of roundrobin to all other cpus */
312static int forward_roundrobin_enabled = 1;
313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314	   &forward_roundrobin_enabled, 0, "");
315
316
317/*
318 * Local data and functions.
319 */
320
321/* Set to 1 once we're ready to let the APs out of the pen. */
322static volatile int aps_ready = 0;
323
324static int	mp_capable;
325static u_int	boot_address;
326static u_int	base_memory;
327
328static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
329static mpfps_t	mpfps;
330static int	search_for_sig(u_int32_t target, int count);
331static void	mp_enable(u_int boot_addr);
332
333static void	mptable_pass1(void);
334static int	mptable_pass2(void);
335static void	default_mp_table(int type);
336static void	fix_mp_table(void);
337static void	setup_apic_irq_mapping(void);
338static void	init_locks(void);
339static int	start_all_aps(u_int boot_addr);
340static void	install_ap_tramp(u_int boot_addr);
341static int	start_ap(int logicalCpu, u_int boot_addr);
342void		ap_init(void);
343static int	apic_int_is_bus_type(int intr, int bus_type);
344static void	release_aps(void *dummy);
345
346/*
347 * initialize all the SMP locks
348 */
349
350/* critical region around IO APIC, apic_imen */
351struct mtx		imen_mtx;
352
353/* lock region used by kernel profiling */
354struct mtx		mcount_mtx;
355
356#ifdef USE_COMLOCK
357/* locks com (tty) data/hardware accesses: a FASTINTR() */
358struct mtx		com_mtx;
359#endif /* USE_COMLOCK */
360
361/* lock around the MP rendezvous */
362static struct mtx	smp_rv_mtx;
363
364/* only 1 CPU can panic at a time :) */
365struct mtx		panic_mtx;
366
367static void
368init_locks(void)
369{
370	/*
371	 * XXX The mcount mutex probably needs to be statically initialized,
372	 * since it will be used even in the function calls that get us to this
373	 * point.
374	 */
375	mtx_init(&mcount_mtx, "mcount", MTX_DEF);
376
377	mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
378	mtx_init(&panic_mtx, "panic", MTX_DEF);
379
380#ifdef USE_COMLOCK
381	mtx_init(&com_mtx, "com", MTX_SPIN);
382#endif /* USE_COMLOCK */
383
384	mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
385}
386
387/*
388 * Calculate usable address in base memory for AP trampoline code.
389 */
390u_int
391mp_bootaddress(u_int basemem)
392{
393	POSTCODE(MP_BOOTADDRESS_POST);
394
395	base_memory = basemem * 1024;	/* convert to bytes */
396
397	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
398	if ((base_memory - boot_address) < bootMP_size)
399		boot_address -= 4096;	/* not enough, lower by 4k */
400
401	return boot_address;
402}
403
404
405/*
406 * Look for an Intel MP spec table (ie, SMP capable hardware).
407 */
408int
409mp_probe(void)
410{
411	int     x;
412	u_long  segment;
413	u_int32_t target;
414
415	POSTCODE(MP_PROBE_POST);
416
417	/* see if EBDA exists */
418	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
419		/* search first 1K of EBDA */
420		target = (u_int32_t) (segment << 4);
421		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
422			goto found;
423	} else {
424		/* last 1K of base memory, effective 'top of base' passed in */
425		target = (u_int32_t) (base_memory - 0x400);
426		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
427			goto found;
428	}
429
430	/* search the BIOS */
431	target = (u_int32_t) BIOS_BASE;
432	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
433		goto found;
434
435	/* nothing found */
436	mpfps = (mpfps_t)0;
437	mp_capable = 0;
438	return 0;
439
440found:
441	/* calculate needed resources */
442	mpfps = (mpfps_t)x;
443	mptable_pass1();
444
445	/* flag fact that we are running multiple processors */
446	mp_capable = 1;
447	return 1;
448}
449
450
451/*
452 * Initialize the SMP hardware and the APIC and start up the AP's.
453 */
454void
455mp_start(void)
456{
457	POSTCODE(MP_START_POST);
458
459	/* look for MP capable motherboard */
460	if (mp_capable)
461		mp_enable(boot_address);
462	else
463		panic("MP hardware not found!");
464}
465
466
467/*
468 * Print various information about the SMP system hardware and setup.
469 */
470void
471mp_announce(void)
472{
473	int     x;
474
475	POSTCODE(MP_ANNOUNCE_POST);
476
477	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
478	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
479	printf(", version: 0x%08x", cpu_apic_versions[0]);
480	printf(", at 0x%08x\n", cpu_apic_address);
481	for (x = 1; x <= mp_naps; ++x) {
482		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
483		printf(", version: 0x%08x", cpu_apic_versions[x]);
484		printf(", at 0x%08x\n", cpu_apic_address);
485	}
486
487#if defined(APIC_IO)
488	for (x = 0; x < mp_napics; ++x) {
489		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
490		printf(", version: 0x%08x", io_apic_versions[x]);
491		printf(", at 0x%08x\n", io_apic_address[x]);
492	}
493#else
494	printf(" Warning: APIC I/O disabled\n");
495#endif	/* APIC_IO */
496}
497
498/*
499 * AP cpu's call this to sync up protected mode.
500 */
501void
502init_secondary(void)
503{
504	int	gsel_tss;
505	int	x, myid = bootAP;
506
507	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
508	gdt_segs[GPROC0_SEL].ssd_base =
509		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
510	SMP_prvspace[myid].globaldata.gd_prvspace =
511		&SMP_prvspace[myid].globaldata;
512
513	for (x = 0; x < NGDT; x++) {
514		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
515	}
516
517	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
518	r_gdt.rd_base = (int) &gdt[myid * NGDT];
519	lgdt(&r_gdt);			/* does magic intra-segment return */
520
521	lidt(&r_idt);
522
523	lldt(_default_ldt);
524#ifdef USER_LDT
525	PCPU_SET(currentldt, _default_ldt);
526#endif
527
528	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
529	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
530	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
531	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
532	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
533	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
534	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
535	ltr(gsel_tss);
536
537	pmap_set_opt();
538}
539
540
541#if defined(APIC_IO)
542/*
543 * Final configuration of the BSP's local APIC:
544 *  - disable 'pic mode'.
545 *  - disable 'virtual wire mode'.
546 *  - enable NMI.
547 */
548void
549bsp_apic_configure(void)
550{
551	u_char		byte;
552	u_int32_t	temp;
553
554	/* leave 'pic mode' if necessary */
555	if (picmode) {
556		outb(0x22, 0x70);	/* select IMCR */
557		byte = inb(0x23);	/* current contents */
558		byte |= 0x01;		/* mask external INTR */
559		outb(0x23, byte);	/* disconnect 8259s/NMI */
560	}
561
562	/* mask lint0 (the 8259 'virtual wire' connection) */
563	temp = lapic.lvt_lint0;
564	temp |= APIC_LVT_M;		/* set the mask */
565	lapic.lvt_lint0 = temp;
566
567        /* setup lint1 to handle NMI */
568        temp = lapic.lvt_lint1;
569        temp &= ~APIC_LVT_M;		/* clear the mask */
570        lapic.lvt_lint1 = temp;
571
572	if (bootverbose)
573		apic_dump("bsp_apic_configure()");
574}
575#endif  /* APIC_IO */
576
577
578/*******************************************************************
579 * local functions and data
580 */
581
582/*
583 * start the SMP system
584 */
585static void
586mp_enable(u_int boot_addr)
587{
588	int     x;
589#if defined(APIC_IO)
590	int     apic;
591	u_int   ux;
592#endif	/* APIC_IO */
593
594	POSTCODE(MP_ENABLE_POST);
595
596	/* turn on 4MB of V == P addressing so we can get to MP table */
597	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
598	invltlb();
599
600	/* examine the MP table for needed info, uses physical addresses */
601	x = mptable_pass2();
602
603	*(int *)PTD = 0;
604	invltlb();
605
606	/* can't process default configs till the CPU APIC is pmapped */
607	if (x)
608		default_mp_table(x);
609
610	/* post scan cleanup */
611	fix_mp_table();
612	setup_apic_irq_mapping();
613
614#if defined(APIC_IO)
615
616	/* fill the LOGICAL io_apic_versions table */
617	for (apic = 0; apic < mp_napics; ++apic) {
618		ux = io_apic_read(apic, IOAPIC_VER);
619		io_apic_versions[apic] = ux;
620		io_apic_set_id(apic, IO_TO_ID(apic));
621	}
622
623	/* program each IO APIC in the system */
624	for (apic = 0; apic < mp_napics; ++apic)
625		if (io_apic_setup(apic) < 0)
626			panic("IO APIC setup failure");
627
628	/* install a 'Spurious INTerrupt' vector */
629	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
630	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
631
632	/* install an inter-CPU IPI for TLB invalidation */
633	setidt(XINVLTLB_OFFSET, Xinvltlb,
634	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
635
636#ifdef BETTER_CLOCK
637	/* install an inter-CPU IPI for reading processor state */
638	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
639	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
640#endif
641
642	/* install an inter-CPU IPI for all-CPU rendezvous */
643	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
644	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
645
646	/* install an inter-CPU IPI for forcing an additional software trap */
647	setidt(XCPUAST_OFFSET, Xcpuast,
648	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
649
650	/* install an inter-CPU IPI for CPU stop/restart */
651	setidt(XCPUSTOP_OFFSET, Xcpustop,
652	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
653
654#if defined(TEST_TEST1)
655	/* install a "fake hardware INTerrupt" vector */
656	setidt(XTEST1_OFFSET, Xtest1,
657	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
658#endif  /** TEST_TEST1 */
659
660#endif	/* APIC_IO */
661
662	/* initialize all SMP locks */
663	init_locks();
664
665	/* start each Application Processor */
666	start_all_aps(boot_addr);
667}
668
669
670/*
671 * look for the MP spec signature
672 */
673
674/* string defined by the Intel MP Spec as identifying the MP table */
675#define MP_SIG		0x5f504d5f	/* _MP_ */
676#define NEXT(X)		((X) += 4)
677static int
678search_for_sig(u_int32_t target, int count)
679{
680	int     x;
681	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
682
683	for (x = 0; x < count; NEXT(x))
684		if (addr[x] == MP_SIG)
685			/* make array index a byte index */
686			return (target + (x * sizeof(u_int32_t)));
687
688	return -1;
689}
690
691
692static basetable_entry basetable_entry_types[] =
693{
694	{0, 20, "Processor"},
695	{1, 8, "Bus"},
696	{2, 8, "I/O APIC"},
697	{3, 8, "I/O INT"},
698	{4, 8, "Local INT"}
699};
700
701typedef struct BUSDATA {
702	u_char  bus_id;
703	enum busTypes bus_type;
704}       bus_datum;
705
706typedef struct INTDATA {
707	u_char  int_type;
708	u_short int_flags;
709	u_char  src_bus_id;
710	u_char  src_bus_irq;
711	u_char  dst_apic_id;
712	u_char  dst_apic_int;
713	u_char	int_vector;
714}       io_int, local_int;
715
716typedef struct BUSTYPENAME {
717	u_char  type;
718	char    name[7];
719}       bus_type_name;
720
721static bus_type_name bus_type_table[] =
722{
723	{CBUS, "CBUS"},
724	{CBUSII, "CBUSII"},
725	{EISA, "EISA"},
726	{MCA, "MCA"},
727	{UNKNOWN_BUSTYPE, "---"},
728	{ISA, "ISA"},
729	{MCA, "MCA"},
730	{UNKNOWN_BUSTYPE, "---"},
731	{UNKNOWN_BUSTYPE, "---"},
732	{UNKNOWN_BUSTYPE, "---"},
733	{UNKNOWN_BUSTYPE, "---"},
734	{UNKNOWN_BUSTYPE, "---"},
735	{PCI, "PCI"},
736	{UNKNOWN_BUSTYPE, "---"},
737	{UNKNOWN_BUSTYPE, "---"},
738	{UNKNOWN_BUSTYPE, "---"},
739	{UNKNOWN_BUSTYPE, "---"},
740	{XPRESS, "XPRESS"},
741	{UNKNOWN_BUSTYPE, "---"}
742};
743/* from MP spec v1.4, table 5-1 */
744static int default_data[7][5] =
745{
746/*   nbus, id0, type0, id1, type1 */
747	{1, 0, ISA, 255, 255},
748	{1, 0, EISA, 255, 255},
749	{1, 0, EISA, 255, 255},
750	{1, 0, MCA, 255, 255},
751	{2, 0, ISA, 1, PCI},
752	{2, 0, EISA, 1, PCI},
753	{2, 0, MCA, 1, PCI}
754};
755
756
757/* the bus data */
758static bus_datum *bus_data;
759
760/* the IO INT data, one entry per possible APIC INTerrupt */
761static io_int  *io_apic_ints;
762
763static int nintrs;
764
765static int processor_entry	__P((proc_entry_ptr entry, int cpu));
766static int bus_entry		__P((bus_entry_ptr entry, int bus));
767static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
768static int int_entry		__P((int_entry_ptr entry, int intr));
769static int lookup_bus_type	__P((char *name));
770
771
772/*
773 * 1st pass on motherboard's Intel MP specification table.
774 *
775 * initializes:
776 *	mp_ncpus = 1
777 *
778 * determines:
779 *	cpu_apic_address (common to all CPUs)
780 *	io_apic_address[N]
781 *	mp_naps
782 *	mp_nbusses
783 *	mp_napics
784 *	nintrs
785 */
786static void
787mptable_pass1(void)
788{
789	int	x;
790	mpcth_t	cth;
791	int	totalSize;
792	void*	position;
793	int	count;
794	int	type;
795
796	POSTCODE(MPTABLE_PASS1_POST);
797
798	/* clear various tables */
799	for (x = 0; x < NAPICID; ++x) {
800		io_apic_address[x] = ~0;	/* IO APIC address table */
801	}
802
803	/* init everything to empty */
804	mp_naps = 0;
805	mp_nbusses = 0;
806	mp_napics = 0;
807	nintrs = 0;
808
809	/* check for use of 'default' configuration */
810	if (MPFPS_MPFB1 != 0) {
811		/* use default addresses */
812		cpu_apic_address = DEFAULT_APIC_BASE;
813		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
814
815		/* fill in with defaults */
816		mp_naps = 2;		/* includes BSP */
817		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
818#if defined(APIC_IO)
819		mp_napics = 1;
820		nintrs = 16;
821#endif	/* APIC_IO */
822	}
823	else {
824		if ((cth = mpfps->pap) == 0)
825			panic("MP Configuration Table Header MISSING!");
826
827		cpu_apic_address = (vm_offset_t) cth->apic_address;
828
829		/* walk the table, recording info of interest */
830		totalSize = cth->base_table_length - sizeof(struct MPCTH);
831		position = (u_char *) cth + sizeof(struct MPCTH);
832		count = cth->entry_count;
833
834		while (count--) {
835			switch (type = *(u_char *) position) {
836			case 0: /* processor_entry */
837				if (((proc_entry_ptr)position)->cpu_flags
838					& PROCENTRY_FLAG_EN)
839					++mp_naps;
840				break;
841			case 1: /* bus_entry */
842				++mp_nbusses;
843				break;
844			case 2: /* io_apic_entry */
845				if (((io_apic_entry_ptr)position)->apic_flags
846					& IOAPICENTRY_FLAG_EN)
847					io_apic_address[mp_napics++] =
848					    (vm_offset_t)((io_apic_entry_ptr)
849						position)->apic_address;
850				break;
851			case 3: /* int_entry */
852				++nintrs;
853				break;
854			case 4:	/* int_entry */
855				break;
856			default:
857				panic("mpfps Base Table HOSED!");
858				/* NOTREACHED */
859			}
860
861			totalSize -= basetable_entry_types[type].length;
862			(u_char*)position += basetable_entry_types[type].length;
863		}
864	}
865
866	/* qualify the numbers */
867	if (mp_naps > MAXCPU) {
868		printf("Warning: only using %d of %d available CPUs!\n",
869			MAXCPU, mp_naps);
870		mp_naps = MAXCPU;
871	}
872
873	/*
874	 * Count the BSP.
875	 * This is also used as a counter while starting the APs.
876	 */
877	mp_ncpus = 1;
878
879	--mp_naps;	/* subtract the BSP */
880}
881
882
883/*
884 * 2nd pass on motherboard's Intel MP specification table.
885 *
886 * sets:
887 *	boot_cpu_id
888 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
889 *	CPU_TO_ID(N), logical CPU to APIC ID table
890 *	IO_TO_ID(N), logical IO to APIC ID table
891 *	bus_data[N]
892 *	io_apic_ints[N]
893 */
894static int
895mptable_pass2(void)
896{
897	int     x;
898	mpcth_t cth;
899	int     totalSize;
900	void*   position;
901	int     count;
902	int     type;
903	int     apic, bus, cpu, intr;
904	int	i, j;
905	int	pgeflag;
906
907	POSTCODE(MPTABLE_PASS2_POST);
908
909	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
910
911	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
912	    M_DEVBUF, M_WAITOK);
913	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
914	    M_DEVBUF, M_WAITOK);
915	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
916	    M_DEVBUF, M_WAITOK);
917	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
918	    M_DEVBUF, M_WAITOK);
919
920	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
921
922	for (i = 0; i < mp_napics; i++) {
923		for (j = 0; j < mp_napics; j++) {
924			/* same page frame as a previous IO apic? */
925			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
926			    (io_apic_address[i] & PG_FRAME)) {
927				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
928					+ (NPTEPG-2-j) * PAGE_SIZE
929					+ (io_apic_address[i] & PAGE_MASK));
930				break;
931			}
932			/* use this slot if available */
933			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
934				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
935				    pgeflag | (io_apic_address[i] & PG_FRAME));
936				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
937					+ (NPTEPG-2-j) * PAGE_SIZE
938					+ (io_apic_address[i] & PAGE_MASK));
939				break;
940			}
941		}
942	}
943
944	/* clear various tables */
945	for (x = 0; x < NAPICID; ++x) {
946		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
947		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
948		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
949	}
950
951	/* clear bus data table */
952	for (x = 0; x < mp_nbusses; ++x)
953		bus_data[x].bus_id = 0xff;
954
955	/* clear IO APIC INT table */
956	for (x = 0; x < (nintrs + 1); ++x) {
957		io_apic_ints[x].int_type = 0xff;
958		io_apic_ints[x].int_vector = 0xff;
959	}
960
961	/* setup the cpu/apic mapping arrays */
962	boot_cpu_id = -1;
963
964	/* record whether PIC or virtual-wire mode */
965	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
966
967	/* check for use of 'default' configuration */
968	if (MPFPS_MPFB1 != 0)
969		return MPFPS_MPFB1;	/* return default configuration type */
970
971	if ((cth = mpfps->pap) == 0)
972		panic("MP Configuration Table Header MISSING!");
973
974	/* walk the table, recording info of interest */
975	totalSize = cth->base_table_length - sizeof(struct MPCTH);
976	position = (u_char *) cth + sizeof(struct MPCTH);
977	count = cth->entry_count;
978	apic = bus = intr = 0;
979	cpu = 1;				/* pre-count the BSP */
980
981	while (count--) {
982		switch (type = *(u_char *) position) {
983		case 0:
984			if (processor_entry(position, cpu))
985				++cpu;
986			break;
987		case 1:
988			if (bus_entry(position, bus))
989				++bus;
990			break;
991		case 2:
992			if (io_apic_entry(position, apic))
993				++apic;
994			break;
995		case 3:
996			if (int_entry(position, intr))
997				++intr;
998			break;
999		case 4:
1000			/* int_entry(position); */
1001			break;
1002		default:
1003			panic("mpfps Base Table HOSED!");
1004			/* NOTREACHED */
1005		}
1006
1007		totalSize -= basetable_entry_types[type].length;
1008		(u_char *) position += basetable_entry_types[type].length;
1009	}
1010
1011	if (boot_cpu_id == -1)
1012		panic("NO BSP found!");
1013
1014	/* report fact that its NOT a default configuration */
1015	return 0;
1016}
1017
1018
1019void
1020assign_apic_irq(int apic, int intpin, int irq)
1021{
1022	int x;
1023
1024	if (int_to_apicintpin[irq].ioapic != -1)
1025		panic("assign_apic_irq: inconsistent table");
1026
1027	int_to_apicintpin[irq].ioapic = apic;
1028	int_to_apicintpin[irq].int_pin = intpin;
1029	int_to_apicintpin[irq].apic_address = ioapic[apic];
1030	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1031
1032	for (x = 0; x < nintrs; x++) {
1033		if ((io_apic_ints[x].int_type == 0 ||
1034		     io_apic_ints[x].int_type == 3) &&
1035		    io_apic_ints[x].int_vector == 0xff &&
1036		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1037		    io_apic_ints[x].dst_apic_int == intpin)
1038			io_apic_ints[x].int_vector = irq;
1039	}
1040}
1041
1042void
1043revoke_apic_irq(int irq)
1044{
1045	int x;
1046	int oldapic;
1047	int oldintpin;
1048
1049	if (int_to_apicintpin[irq].ioapic == -1)
1050		panic("assign_apic_irq: inconsistent table");
1051
1052	oldapic = int_to_apicintpin[irq].ioapic;
1053	oldintpin = int_to_apicintpin[irq].int_pin;
1054
1055	int_to_apicintpin[irq].ioapic = -1;
1056	int_to_apicintpin[irq].int_pin = 0;
1057	int_to_apicintpin[irq].apic_address = NULL;
1058	int_to_apicintpin[irq].redirindex = 0;
1059
1060	for (x = 0; x < nintrs; x++) {
1061		if ((io_apic_ints[x].int_type == 0 ||
1062		     io_apic_ints[x].int_type == 3) &&
1063		    io_apic_ints[x].int_vector == 0xff &&
1064		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1065		    io_apic_ints[x].dst_apic_int == oldintpin)
1066			io_apic_ints[x].int_vector = 0xff;
1067	}
1068}
1069
1070
1071
1072static void
1073swap_apic_id(int apic, int oldid, int newid)
1074{
1075	int x;
1076	int oapic;
1077
1078
1079	if (oldid == newid)
1080		return;			/* Nothing to do */
1081
1082	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1083	       apic, oldid, newid);
1084
1085	/* Swap physical APIC IDs in interrupt entries */
1086	for (x = 0; x < nintrs; x++) {
1087		if (io_apic_ints[x].dst_apic_id == oldid)
1088			io_apic_ints[x].dst_apic_id = newid;
1089		else if (io_apic_ints[x].dst_apic_id == newid)
1090			io_apic_ints[x].dst_apic_id = oldid;
1091	}
1092
1093	/* Swap physical APIC IDs in IO_TO_ID mappings */
1094	for (oapic = 0; oapic < mp_napics; oapic++)
1095		if (IO_TO_ID(oapic) == newid)
1096			break;
1097
1098	if (oapic < mp_napics) {
1099		printf("Changing APIC ID for IO APIC #%d from "
1100		       "%d to %d in MP table\n",
1101		       oapic, newid, oldid);
1102		IO_TO_ID(oapic) = oldid;
1103	}
1104	IO_TO_ID(apic) = newid;
1105}
1106
1107
1108static void
1109fix_id_to_io_mapping(void)
1110{
1111	int x;
1112
1113	for (x = 0; x < NAPICID; x++)
1114		ID_TO_IO(x) = -1;
1115
1116	for (x = 0; x <= mp_naps; x++)
1117		if (CPU_TO_ID(x) < NAPICID)
1118			ID_TO_IO(CPU_TO_ID(x)) = x;
1119
1120	for (x = 0; x < mp_napics; x++)
1121		if (IO_TO_ID(x) < NAPICID)
1122			ID_TO_IO(IO_TO_ID(x)) = x;
1123}
1124
1125
1126static int
1127first_free_apic_id(void)
1128{
1129	int freeid, x;
1130
1131	for (freeid = 0; freeid < NAPICID; freeid++) {
1132		for (x = 0; x <= mp_naps; x++)
1133			if (CPU_TO_ID(x) == freeid)
1134				break;
1135		if (x <= mp_naps)
1136			continue;
1137		for (x = 0; x < mp_napics; x++)
1138			if (IO_TO_ID(x) == freeid)
1139				break;
1140		if (x < mp_napics)
1141			continue;
1142		return freeid;
1143	}
1144	return freeid;
1145}
1146
1147
1148static int
1149io_apic_id_acceptable(int apic, int id)
1150{
1151	int cpu;		/* Logical CPU number */
1152	int oapic;		/* Logical IO APIC number for other IO APIC */
1153
1154	if (id >= NAPICID)
1155		return 0;	/* Out of range */
1156
1157	for (cpu = 0; cpu <= mp_naps; cpu++)
1158		if (CPU_TO_ID(cpu) == id)
1159			return 0;	/* Conflict with CPU */
1160
1161	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1162		if (IO_TO_ID(oapic) == id)
1163			return 0;	/* Conflict with other APIC */
1164
1165	return 1;		/* ID is acceptable for IO APIC */
1166}
1167
1168
1169/*
1170 * parse an Intel MP specification table
1171 */
1172static void
1173fix_mp_table(void)
1174{
1175	int	x;
1176	int	id;
1177	int	bus_0 = 0;	/* Stop GCC warning */
1178	int	bus_pci = 0;	/* Stop GCC warning */
1179	int	num_pci_bus;
1180	int	apic;		/* IO APIC unit number */
1181	int     freeid;		/* Free physical APIC ID */
1182	int	physid;		/* Current physical IO APIC ID */
1183
1184	/*
1185	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1186	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1187	 * exists the BIOS must begin with bus entries for the PCI bus and use
1188	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1189	 * exists the BIOS can choose to ignore this ordering, and indeed many
1190	 * MP motherboards do ignore it.  This causes a problem when the PCI
1191	 * sub-system makes requests of the MP sub-system based on PCI bus
1192	 * numbers.	So here we look for the situation and renumber the
1193	 * busses and associated INTs in an effort to "make it right".
1194	 */
1195
1196	/* find bus 0, PCI bus, count the number of PCI busses */
1197	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1198		if (bus_data[x].bus_id == 0) {
1199			bus_0 = x;
1200		}
1201		if (bus_data[x].bus_type == PCI) {
1202			++num_pci_bus;
1203			bus_pci = x;
1204		}
1205	}
1206	/*
1207	 * bus_0 == slot of bus with ID of 0
1208	 * bus_pci == slot of last PCI bus encountered
1209	 */
1210
1211	/* check the 1 PCI bus case for sanity */
1212	/* if it is number 0 all is well */
1213	if (num_pci_bus == 1 &&
1214	    bus_data[bus_pci].bus_id != 0) {
1215
1216		/* mis-numbered, swap with whichever bus uses slot 0 */
1217
1218		/* swap the bus entry types */
1219		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1220		bus_data[bus_0].bus_type = PCI;
1221
1222		/* swap each relavant INTerrupt entry */
1223		id = bus_data[bus_pci].bus_id;
1224		for (x = 0; x < nintrs; ++x) {
1225			if (io_apic_ints[x].src_bus_id == id) {
1226				io_apic_ints[x].src_bus_id = 0;
1227			}
1228			else if (io_apic_ints[x].src_bus_id == 0) {
1229				io_apic_ints[x].src_bus_id = id;
1230			}
1231		}
1232	}
1233
1234	/* Assign IO APIC IDs.
1235	 *
1236	 * First try the existing ID. If a conflict is detected, try
1237	 * the ID in the MP table.  If a conflict is still detected, find
1238	 * a free id.
1239	 *
1240	 * We cannot use the ID_TO_IO table before all conflicts has been
1241	 * resolved and the table has been corrected.
1242	 */
1243	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1244
1245		/* First try to use the value set by the BIOS */
1246		physid = io_apic_get_id(apic);
1247		if (io_apic_id_acceptable(apic, physid)) {
1248			if (IO_TO_ID(apic) != physid)
1249				swap_apic_id(apic, IO_TO_ID(apic), physid);
1250			continue;
1251		}
1252
1253		/* Then check if the value in the MP table is acceptable */
1254		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1255			continue;
1256
1257		/* Last resort, find a free APIC ID and use it */
1258		freeid = first_free_apic_id();
1259		if (freeid >= NAPICID)
1260			panic("No free physical APIC IDs found");
1261
1262		if (io_apic_id_acceptable(apic, freeid)) {
1263			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1264			continue;
1265		}
1266		panic("Free physical APIC ID not usable");
1267	}
1268	fix_id_to_io_mapping();
1269
1270	/* detect and fix broken Compaq MP table */
1271	if (apic_int_type(0, 0) == -1) {
1272		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1273		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1274		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1275		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1276		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1277		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1278		nintrs++;
1279	}
1280}
1281
1282
1283/* Assign low level interrupt handlers */
1284static void
1285setup_apic_irq_mapping(void)
1286{
1287	int	x;
1288	int	int_vector;
1289
1290	/* Clear array */
1291	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1292		int_to_apicintpin[x].ioapic = -1;
1293		int_to_apicintpin[x].int_pin = 0;
1294		int_to_apicintpin[x].apic_address = NULL;
1295		int_to_apicintpin[x].redirindex = 0;
1296	}
1297
1298	/* First assign ISA/EISA interrupts */
1299	for (x = 0; x < nintrs; x++) {
1300		int_vector = io_apic_ints[x].src_bus_irq;
1301		if (int_vector < APIC_INTMAPSIZE &&
1302		    io_apic_ints[x].int_vector == 0xff &&
1303		    int_to_apicintpin[int_vector].ioapic == -1 &&
1304		    (apic_int_is_bus_type(x, ISA) ||
1305		     apic_int_is_bus_type(x, EISA)) &&
1306		    io_apic_ints[x].int_type == 0) {
1307			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1308					io_apic_ints[x].dst_apic_int,
1309					int_vector);
1310		}
1311	}
1312
1313	/* Assign first set of interrupts to intpins on IOAPIC #0 */
1314	for (x = 0; x < nintrs; x++) {
1315		int_vector = io_apic_ints[x].dst_apic_int;
1316		if (int_vector < APIC_INTMAPSIZE &&
1317		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1318		    io_apic_ints[x].int_vector == 0xff &&
1319		    int_to_apicintpin[int_vector].ioapic == -1 &&
1320		    (io_apic_ints[x].int_type == 0 ||
1321		     io_apic_ints[x].int_type == 3)) {
1322			assign_apic_irq(0,
1323					io_apic_ints[x].dst_apic_int,
1324					int_vector);
1325		}
1326	}
1327	/*
1328	 * Assign interrupts for remaining intpins.
1329	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1330	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1331	 * due to 8254 interrupts not being delivered can reuse that low level
1332	 * interrupt handler.
1333	 */
1334	int_vector = 0;
1335	while (int_vector < APIC_INTMAPSIZE &&
1336	       int_to_apicintpin[int_vector].ioapic != -1)
1337		int_vector++;
1338	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1339		if ((io_apic_ints[x].int_type == 0 ||
1340		     (io_apic_ints[x].int_type == 3 &&
1341		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1342		       io_apic_ints[x].dst_apic_int != 0))) &&
1343		    io_apic_ints[x].int_vector == 0xff) {
1344			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1345					io_apic_ints[x].dst_apic_int,
1346					int_vector);
1347			int_vector++;
1348			while (int_vector < APIC_INTMAPSIZE &&
1349			       int_to_apicintpin[int_vector].ioapic != -1)
1350				int_vector++;
1351		}
1352	}
1353}
1354
1355
1356static int
1357processor_entry(proc_entry_ptr entry, int cpu)
1358{
1359	/* check for usability */
1360	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1361		return 0;
1362
1363	if(entry->apic_id >= NAPICID)
1364		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1365	/* check for BSP flag */
1366	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1367		boot_cpu_id = entry->apic_id;
1368		CPU_TO_ID(0) = entry->apic_id;
1369		ID_TO_CPU(entry->apic_id) = 0;
1370		return 0;	/* its already been counted */
1371	}
1372
1373	/* add another AP to list, if less than max number of CPUs */
1374	else if (cpu < MAXCPU) {
1375		CPU_TO_ID(cpu) = entry->apic_id;
1376		ID_TO_CPU(entry->apic_id) = cpu;
1377		return 1;
1378	}
1379
1380	return 0;
1381}
1382
1383
1384static int
1385bus_entry(bus_entry_ptr entry, int bus)
1386{
1387	int     x;
1388	char    c, name[8];
1389
1390	/* encode the name into an index */
1391	for (x = 0; x < 6; ++x) {
1392		if ((c = entry->bus_type[x]) == ' ')
1393			break;
1394		name[x] = c;
1395	}
1396	name[x] = '\0';
1397
1398	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1399		panic("unknown bus type: '%s'", name);
1400
1401	bus_data[bus].bus_id = entry->bus_id;
1402	bus_data[bus].bus_type = x;
1403
1404	return 1;
1405}
1406
1407
1408static int
1409io_apic_entry(io_apic_entry_ptr entry, int apic)
1410{
1411	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1412		return 0;
1413
1414	IO_TO_ID(apic) = entry->apic_id;
1415	if (entry->apic_id < NAPICID)
1416		ID_TO_IO(entry->apic_id) = apic;
1417
1418	return 1;
1419}
1420
1421
1422static int
1423lookup_bus_type(char *name)
1424{
1425	int     x;
1426
1427	for (x = 0; x < MAX_BUSTYPE; ++x)
1428		if (strcmp(bus_type_table[x].name, name) == 0)
1429			return bus_type_table[x].type;
1430
1431	return UNKNOWN_BUSTYPE;
1432}
1433
1434
1435static int
1436int_entry(int_entry_ptr entry, int intr)
1437{
1438	int apic;
1439
1440	io_apic_ints[intr].int_type = entry->int_type;
1441	io_apic_ints[intr].int_flags = entry->int_flags;
1442	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1443	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1444	if (entry->dst_apic_id == 255) {
1445		/* This signal goes to all IO APICS.  Select an IO APIC
1446		   with sufficient number of interrupt pins */
1447		for (apic = 0; apic < mp_napics; apic++)
1448			if (((io_apic_read(apic, IOAPIC_VER) &
1449			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1450			    entry->dst_apic_int)
1451				break;
1452		if (apic < mp_napics)
1453			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1454		else
1455			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1456	} else
1457		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1458	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1459
1460	return 1;
1461}
1462
1463
1464static int
1465apic_int_is_bus_type(int intr, int bus_type)
1466{
1467	int     bus;
1468
1469	for (bus = 0; bus < mp_nbusses; ++bus)
1470		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1471		    && ((int) bus_data[bus].bus_type == bus_type))
1472			return 1;
1473
1474	return 0;
1475}
1476
1477
1478/*
1479 * Given a traditional ISA INT mask, return an APIC mask.
1480 */
1481u_int
1482isa_apic_mask(u_int isa_mask)
1483{
1484	int isa_irq;
1485	int apic_pin;
1486
1487#if defined(SKIP_IRQ15_REDIRECT)
1488	if (isa_mask == (1 << 15)) {
1489		printf("skipping ISA IRQ15 redirect\n");
1490		return isa_mask;
1491	}
1492#endif  /* SKIP_IRQ15_REDIRECT */
1493
1494	isa_irq = ffs(isa_mask);		/* find its bit position */
1495	if (isa_irq == 0)			/* doesn't exist */
1496		return 0;
1497	--isa_irq;				/* make it zero based */
1498
1499	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1500	if (apic_pin == -1)
1501		return 0;
1502
1503	return (1 << apic_pin);			/* convert pin# to a mask */
1504}
1505
1506
1507/*
1508 * Determine which APIC pin an ISA/EISA INT is attached to.
1509 */
1510#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1511#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1512#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1513#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1514
1515#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1516int
1517isa_apic_irq(int isa_irq)
1518{
1519	int     intr;
1520
1521	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1522		if (INTTYPE(intr) == 0) {		/* standard INT */
1523			if (SRCBUSIRQ(intr) == isa_irq) {
1524				if (apic_int_is_bus_type(intr, ISA) ||
1525			            apic_int_is_bus_type(intr, EISA))
1526					return INTIRQ(intr);	/* found */
1527			}
1528		}
1529	}
1530	return -1;					/* NOT found */
1531}
1532
1533
1534/*
1535 * Determine which APIC pin a PCI INT is attached to.
1536 */
1537#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1538#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1539#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1540int
1541pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1542{
1543	int     intr;
1544
1545	--pciInt;					/* zero based */
1546
1547	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1548		if ((INTTYPE(intr) == 0)		/* standard INT */
1549		    && (SRCBUSID(intr) == pciBus)
1550		    && (SRCBUSDEVICE(intr) == pciDevice)
1551		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1552			if (apic_int_is_bus_type(intr, PCI))
1553				return INTIRQ(intr);	/* exact match */
1554
1555	return -1;					/* NOT found */
1556}
1557
1558int
1559next_apic_irq(int irq)
1560{
1561	int intr, ointr;
1562	int bus, bustype;
1563
1564	bus = 0;
1565	bustype = 0;
1566	for (intr = 0; intr < nintrs; intr++) {
1567		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1568			continue;
1569		bus = SRCBUSID(intr);
1570		bustype = apic_bus_type(bus);
1571		if (bustype != ISA &&
1572		    bustype != EISA &&
1573		    bustype != PCI)
1574			continue;
1575		break;
1576	}
1577	if (intr >= nintrs) {
1578		return -1;
1579	}
1580	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1581		if (INTTYPE(ointr) != 0)
1582			continue;
1583		if (bus != SRCBUSID(ointr))
1584			continue;
1585		if (bustype == PCI) {
1586			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1587				continue;
1588			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1589				continue;
1590		}
1591		if (bustype == ISA || bustype == EISA) {
1592			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1593				continue;
1594		}
1595		if (INTPIN(intr) == INTPIN(ointr))
1596			continue;
1597		break;
1598	}
1599	if (ointr >= nintrs) {
1600		return -1;
1601	}
1602	return INTIRQ(ointr);
1603}
1604#undef SRCBUSLINE
1605#undef SRCBUSDEVICE
1606#undef SRCBUSID
1607#undef SRCBUSIRQ
1608
1609#undef INTPIN
1610#undef INTIRQ
1611#undef INTAPIC
1612#undef INTTYPE
1613
1614
1615/*
1616 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1617 *
1618 * XXX FIXME:
1619 *  Exactly what this means is unclear at this point.  It is a solution
1620 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1621 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1622 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1623 *  option.
1624 */
1625int
1626undirect_isa_irq(int rirq)
1627{
1628#if defined(READY)
1629	if (bootverbose)
1630	    printf("Freeing redirected ISA irq %d.\n", rirq);
1631	/** FIXME: tickle the MB redirector chip */
1632	return -1;
1633#else
1634	if (bootverbose)
1635	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1636	return 0;
1637#endif  /* READY */
1638}
1639
1640
1641/*
1642 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1643 */
1644int
1645undirect_pci_irq(int rirq)
1646{
1647#if defined(READY)
1648	if (bootverbose)
1649		printf("Freeing redirected PCI irq %d.\n", rirq);
1650
1651	/** FIXME: tickle the MB redirector chip */
1652	return -1;
1653#else
1654	if (bootverbose)
1655		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1656		       rirq);
1657	return 0;
1658#endif  /* READY */
1659}
1660
1661
1662/*
1663 * given a bus ID, return:
1664 *  the bus type if found
1665 *  -1 if NOT found
1666 */
1667int
1668apic_bus_type(int id)
1669{
1670	int     x;
1671
1672	for (x = 0; x < mp_nbusses; ++x)
1673		if (bus_data[x].bus_id == id)
1674			return bus_data[x].bus_type;
1675
1676	return -1;
1677}
1678
1679
1680/*
1681 * given a LOGICAL APIC# and pin#, return:
1682 *  the associated src bus ID if found
1683 *  -1 if NOT found
1684 */
1685int
1686apic_src_bus_id(int apic, int pin)
1687{
1688	int     x;
1689
1690	/* search each of the possible INTerrupt sources */
1691	for (x = 0; x < nintrs; ++x)
1692		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1693		    (pin == io_apic_ints[x].dst_apic_int))
1694			return (io_apic_ints[x].src_bus_id);
1695
1696	return -1;		/* NOT found */
1697}
1698
1699
1700/*
1701 * given a LOGICAL APIC# and pin#, return:
1702 *  the associated src bus IRQ if found
1703 *  -1 if NOT found
1704 */
1705int
1706apic_src_bus_irq(int apic, int pin)
1707{
1708	int     x;
1709
1710	for (x = 0; x < nintrs; x++)
1711		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1712		    (pin == io_apic_ints[x].dst_apic_int))
1713			return (io_apic_ints[x].src_bus_irq);
1714
1715	return -1;		/* NOT found */
1716}
1717
1718
1719/*
1720 * given a LOGICAL APIC# and pin#, return:
1721 *  the associated INTerrupt type if found
1722 *  -1 if NOT found
1723 */
1724int
1725apic_int_type(int apic, int pin)
1726{
1727	int     x;
1728
1729	/* search each of the possible INTerrupt sources */
1730	for (x = 0; x < nintrs; ++x)
1731		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1732		    (pin == io_apic_ints[x].dst_apic_int))
1733			return (io_apic_ints[x].int_type);
1734
1735	return -1;		/* NOT found */
1736}
1737
1738int
1739apic_irq(int apic, int pin)
1740{
1741	int x;
1742	int res;
1743
1744	for (x = 0; x < nintrs; ++x)
1745		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1746		    (pin == io_apic_ints[x].dst_apic_int)) {
1747			res = io_apic_ints[x].int_vector;
1748			if (res == 0xff)
1749				return -1;
1750			if (apic != int_to_apicintpin[res].ioapic)
1751				panic("apic_irq: inconsistent table");
1752			if (pin != int_to_apicintpin[res].int_pin)
1753				panic("apic_irq inconsistent table (2)");
1754			return res;
1755		}
1756	return -1;
1757}
1758
1759
1760/*
1761 * given a LOGICAL APIC# and pin#, return:
1762 *  the associated trigger mode if found
1763 *  -1 if NOT found
1764 */
1765int
1766apic_trigger(int apic, int pin)
1767{
1768	int     x;
1769
1770	/* search each of the possible INTerrupt sources */
1771	for (x = 0; x < nintrs; ++x)
1772		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1773		    (pin == io_apic_ints[x].dst_apic_int))
1774			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1775
1776	return -1;		/* NOT found */
1777}
1778
1779
1780/*
1781 * given a LOGICAL APIC# and pin#, return:
1782 *  the associated 'active' level if found
1783 *  -1 if NOT found
1784 */
1785int
1786apic_polarity(int apic, int pin)
1787{
1788	int     x;
1789
1790	/* search each of the possible INTerrupt sources */
1791	for (x = 0; x < nintrs; ++x)
1792		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1793		    (pin == io_apic_ints[x].dst_apic_int))
1794			return (io_apic_ints[x].int_flags & 0x03);
1795
1796	return -1;		/* NOT found */
1797}
1798
1799
1800/*
1801 * set data according to MP defaults
1802 * FIXME: probably not complete yet...
1803 */
1804static void
1805default_mp_table(int type)
1806{
1807	int     ap_cpu_id;
1808#if defined(APIC_IO)
1809	int     io_apic_id;
1810	int     pin;
1811#endif	/* APIC_IO */
1812
1813#if 0
1814	printf("  MP default config type: %d\n", type);
1815	switch (type) {
1816	case 1:
1817		printf("   bus: ISA, APIC: 82489DX\n");
1818		break;
1819	case 2:
1820		printf("   bus: EISA, APIC: 82489DX\n");
1821		break;
1822	case 3:
1823		printf("   bus: EISA, APIC: 82489DX\n");
1824		break;
1825	case 4:
1826		printf("   bus: MCA, APIC: 82489DX\n");
1827		break;
1828	case 5:
1829		printf("   bus: ISA+PCI, APIC: Integrated\n");
1830		break;
1831	case 6:
1832		printf("   bus: EISA+PCI, APIC: Integrated\n");
1833		break;
1834	case 7:
1835		printf("   bus: MCA+PCI, APIC: Integrated\n");
1836		break;
1837	default:
1838		printf("   future type\n");
1839		break;
1840		/* NOTREACHED */
1841	}
1842#endif	/* 0 */
1843
1844	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1845	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1846
1847	/* BSP */
1848	CPU_TO_ID(0) = boot_cpu_id;
1849	ID_TO_CPU(boot_cpu_id) = 0;
1850
1851	/* one and only AP */
1852	CPU_TO_ID(1) = ap_cpu_id;
1853	ID_TO_CPU(ap_cpu_id) = 1;
1854
1855#if defined(APIC_IO)
1856	/* one and only IO APIC */
1857	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1858
1859	/*
1860	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1861	 * necessary as some hardware isn't properly setting up the IO APIC
1862	 */
1863#if defined(REALLY_ANAL_IOAPICID_VALUE)
1864	if (io_apic_id != 2) {
1865#else
1866	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1867#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1868		io_apic_set_id(0, 2);
1869		io_apic_id = 2;
1870	}
1871	IO_TO_ID(0) = io_apic_id;
1872	ID_TO_IO(io_apic_id) = 0;
1873#endif	/* APIC_IO */
1874
1875	/* fill out bus entries */
1876	switch (type) {
1877	case 1:
1878	case 2:
1879	case 3:
1880	case 4:
1881	case 5:
1882	case 6:
1883	case 7:
1884		bus_data[0].bus_id = default_data[type - 1][1];
1885		bus_data[0].bus_type = default_data[type - 1][2];
1886		bus_data[1].bus_id = default_data[type - 1][3];
1887		bus_data[1].bus_type = default_data[type - 1][4];
1888		break;
1889
1890	/* case 4: case 7:		   MCA NOT supported */
1891	default:		/* illegal/reserved */
1892		panic("BAD default MP config: %d", type);
1893		/* NOTREACHED */
1894	}
1895
1896#if defined(APIC_IO)
1897	/* general cases from MP v1.4, table 5-2 */
1898	for (pin = 0; pin < 16; ++pin) {
1899		io_apic_ints[pin].int_type = 0;
1900		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1901		io_apic_ints[pin].src_bus_id = 0;
1902		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1903		io_apic_ints[pin].dst_apic_id = io_apic_id;
1904		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1905	}
1906
1907	/* special cases from MP v1.4, table 5-2 */
1908	if (type == 2) {
1909		io_apic_ints[2].int_type = 0xff;	/* N/C */
1910		io_apic_ints[13].int_type = 0xff;	/* N/C */
1911#if !defined(APIC_MIXED_MODE)
1912		/** FIXME: ??? */
1913		panic("sorry, can't support type 2 default yet");
1914#endif	/* APIC_MIXED_MODE */
1915	}
1916	else
1917		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1918
1919	if (type == 7)
1920		io_apic_ints[0].int_type = 0xff;	/* N/C */
1921	else
1922		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1923#endif	/* APIC_IO */
1924}
1925
1926
1927/*
1928 * start each AP in our list
1929 */
1930static int
1931start_all_aps(u_int boot_addr)
1932{
1933	int     x, i, pg;
1934	u_char  mpbiosreason;
1935	u_long  mpbioswarmvec;
1936	struct globaldata *gd;
1937	char *stack;
1938
1939	POSTCODE(START_ALL_APS_POST);
1940
1941	/* initialize BSP's local APIC */
1942	apic_initialize();
1943	bsp_apic_ready = 1;
1944
1945	/* install the AP 1st level boot code */
1946	install_ap_tramp(boot_addr);
1947
1948
1949	/* save the current value of the warm-start vector */
1950	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1951#ifndef PC98
1952	outb(CMOS_REG, BIOS_RESET);
1953	mpbiosreason = inb(CMOS_DATA);
1954#endif
1955
1956	/* record BSP in CPU map */
1957	all_cpus = 1;
1958
1959	/* set up 0 -> 4MB P==V mapping for AP boot */
1960	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1961	invltlb();
1962
1963	/* start each AP */
1964	for (x = 1; x <= mp_naps; ++x) {
1965
1966		/* This is a bit verbose, it will go away soon.  */
1967
1968		/* first page of AP's private space */
1969		pg = x * i386_btop(sizeof(struct privatespace));
1970
1971		/* allocate a new private data page */
1972		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1973
1974		/* wire it into the private page table page */
1975		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1976
1977		/* allocate and set up an idle stack data page */
1978		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1979		for (i = 0; i < UPAGES; i++)
1980			SMPpt[pg + 5 + i] = (pt_entry_t)
1981			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1982
1983		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1984		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1985		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1986		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1987
1988		/* prime data page for it to use */
1989		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1990		gd->gd_cpuid = x;
1991		gd->gd_cpu_lockid = x << 24;
1992
1993		/* setup a vector to our boot code */
1994		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1995		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1996#ifndef PC98
1997		outb(CMOS_REG, BIOS_RESET);
1998		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
1999#endif
2000
2001		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2002		bootAP = x;
2003
2004		/* attempt to start the Application Processor */
2005		CHECK_INIT(99);	/* setup checkpoints */
2006		if (!start_ap(x, boot_addr)) {
2007			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2008			CHECK_PRINT("trace");	/* show checkpoints */
2009			/* better panic as the AP may be running loose */
2010			printf("panic y/n? [y] ");
2011			if (cngetc() != 'n')
2012				panic("bye-bye");
2013		}
2014		CHECK_PRINT("trace");		/* show checkpoints */
2015
2016		/* record its version info */
2017		cpu_apic_versions[x] = cpu_apic_versions[0];
2018
2019		all_cpus |= (1 << x);		/* record AP in CPU map */
2020	}
2021
2022	/* build our map of 'other' CPUs */
2023	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2024
2025	/* fill in our (BSP) APIC version */
2026	cpu_apic_versions[0] = lapic.version;
2027
2028	/* restore the warmstart vector */
2029	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2030#ifndef PC98
2031	outb(CMOS_REG, BIOS_RESET);
2032	outb(CMOS_DATA, mpbiosreason);
2033#endif
2034
2035	/*
2036	 * Set up the idle context for the BSP.  Similar to above except
2037	 * that some was done by locore, some by pmap.c and some is implicit
2038	 * because the BSP is cpu#0 and the page is initially zero, and also
2039	 * because we can refer to variables by name on the BSP..
2040	 */
2041
2042	/* Allocate and setup BSP idle stack */
2043	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2044	for (i = 0; i < UPAGES; i++)
2045		SMPpt[5 + i] = (pt_entry_t)
2046		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2047
2048	*(int *)PTD = 0;
2049	pmap_set_opt();
2050
2051	/* number of APs actually started */
2052	return mp_ncpus - 1;
2053}
2054
2055
2056/*
2057 * load the 1st level AP boot code into base memory.
2058 */
2059
2060/* targets for relocation */
2061extern void bigJump(void);
2062extern void bootCodeSeg(void);
2063extern void bootDataSeg(void);
2064extern void MPentry(void);
2065extern u_int MP_GDT;
2066extern u_int mp_gdtbase;
2067
2068static void
2069install_ap_tramp(u_int boot_addr)
2070{
2071	int     x;
2072	int     size = *(int *) ((u_long) & bootMP_size);
2073	u_char *src = (u_char *) ((u_long) bootMP);
2074	u_char *dst = (u_char *) boot_addr + KERNBASE;
2075	u_int   boot_base = (u_int) bootMP;
2076	u_int8_t *dst8;
2077	u_int16_t *dst16;
2078	u_int32_t *dst32;
2079
2080	POSTCODE(INSTALL_AP_TRAMP_POST);
2081
2082	for (x = 0; x < size; ++x)
2083		*dst++ = *src++;
2084
2085	/*
2086	 * modify addresses in code we just moved to basemem. unfortunately we
2087	 * need fairly detailed info about mpboot.s for this to work.  changes
2088	 * to mpboot.s might require changes here.
2089	 */
2090
2091	/* boot code is located in KERNEL space */
2092	dst = (u_char *) boot_addr + KERNBASE;
2093
2094	/* modify the lgdt arg */
2095	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2096	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2097
2098	/* modify the ljmp target for MPentry() */
2099	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2100	*dst32 = ((u_int) MPentry - KERNBASE);
2101
2102	/* modify the target for boot code segment */
2103	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2104	dst8 = (u_int8_t *) (dst16 + 1);
2105	*dst16 = (u_int) boot_addr & 0xffff;
2106	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2107
2108	/* modify the target for boot data segment */
2109	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2110	dst8 = (u_int8_t *) (dst16 + 1);
2111	*dst16 = (u_int) boot_addr & 0xffff;
2112	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2113}
2114
2115
2116/*
2117 * this function starts the AP (application processor) identified
2118 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2119 * to accomplish this.  This is necessary because of the nuances
2120 * of the different hardware we might encounter.  It ain't pretty,
2121 * but it seems to work.
2122 */
2123static int
2124start_ap(int logical_cpu, u_int boot_addr)
2125{
2126	int     physical_cpu;
2127	int     vector;
2128	int     cpus;
2129	u_long  icr_lo, icr_hi;
2130
2131	POSTCODE(START_AP_POST);
2132
2133	/* get the PHYSICAL APIC ID# */
2134	physical_cpu = CPU_TO_ID(logical_cpu);
2135
2136	/* calculate the vector */
2137	vector = (boot_addr >> 12) & 0xff;
2138
2139	/* used as a watchpoint to signal AP startup */
2140	cpus = mp_ncpus;
2141
2142	/*
2143	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2144	 * and running the target CPU. OR this INIT IPI might be latched (P5
2145	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2146	 * ignored.
2147	 */
2148
2149	/* setup the address for the target AP */
2150	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2151	icr_hi |= (physical_cpu << 24);
2152	lapic.icr_hi = icr_hi;
2153
2154	/* do an INIT IPI: assert RESET */
2155	icr_lo = lapic.icr_lo & 0xfff00000;
2156	lapic.icr_lo = icr_lo | 0x0000c500;
2157
2158	/* wait for pending status end */
2159	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2160		 /* spin */ ;
2161
2162	/* do an INIT IPI: deassert RESET */
2163	lapic.icr_lo = icr_lo | 0x00008500;
2164
2165	/* wait for pending status end */
2166	u_sleep(10000);		/* wait ~10mS */
2167	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2168		 /* spin */ ;
2169
2170	/*
2171	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2172	 * latched, (P5 bug) this 1st STARTUP would then terminate
2173	 * immediately, and the previously started INIT IPI would continue. OR
2174	 * the previous INIT IPI has already run. and this STARTUP IPI will
2175	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2176	 * will run.
2177	 */
2178
2179	/* do a STARTUP IPI */
2180	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2181	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2182		 /* spin */ ;
2183	u_sleep(200);		/* wait ~200uS */
2184
2185	/*
2186	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2187	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2188	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2189	 * recognized after hardware RESET or INIT IPI.
2190	 */
2191
2192	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2193	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2194		 /* spin */ ;
2195	u_sleep(200);		/* wait ~200uS */
2196
2197	/* wait for it to start */
2198	set_apic_timer(5000000);/* == 5 seconds */
2199	while (read_apic_timer())
2200		if (mp_ncpus > cpus)
2201			return 1;	/* return SUCCESS */
2202
2203	return 0;		/* return FAILURE */
2204}
2205
2206/*
2207 * Flush the TLB on all other CPU's
2208 *
2209 * XXX: Needs to handshake and wait for completion before proceding.
2210 */
2211void
2212smp_invltlb(void)
2213{
2214#if defined(APIC_IO)
2215	if (smp_started && invltlb_ok)
2216		all_but_self_ipi(XINVLTLB_OFFSET);
2217#endif  /* APIC_IO */
2218}
2219
2220void
2221invlpg(u_int addr)
2222{
2223	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2224
2225	/* send a message to the other CPUs */
2226	smp_invltlb();
2227}
2228
2229void
2230invltlb(void)
2231{
2232	u_long  temp;
2233
2234	/*
2235	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2236	 * inlined.
2237	 */
2238	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2239
2240	/* send a message to the other CPUs */
2241	smp_invltlb();
2242}
2243
2244
2245/*
2246 * This is called once the rest of the system is up and running and we're
2247 * ready to let the AP's out of the pen.
2248 */
2249void
2250ap_init(void)
2251{
2252	u_int	apic_id;
2253
2254	/* spin until all the AP's are ready */
2255	while (!aps_ready)
2256		/* spin */ ;
2257
2258	/* lock against other AP's that are waking up */
2259	mtx_enter(&ap_boot_mtx, MTX_SPIN);
2260
2261	/* BSP may have changed PTD while we're waiting for the lock */
2262	cpu_invltlb();
2263
2264	smp_cpus++;
2265
2266#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2267	lidt(&r_idt);
2268#endif
2269
2270	/* Build our map of 'other' CPUs. */
2271	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2272
2273	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2274
2275	/* set up CPU registers and state */
2276	cpu_setregs();
2277
2278	/* set up FPU state on the AP */
2279	npxinit(__INITIAL_NPXCW__);
2280
2281	/* A quick check from sanity claus */
2282	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2283	if (PCPU_GET(cpuid) != apic_id) {
2284		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2285		printf("SMP: apic_id = %d\n", apic_id);
2286		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2287		panic("cpuid mismatch! boom!!");
2288	}
2289
2290	/* Init local apic for irq's */
2291	apic_initialize();
2292
2293	/* Set memory range attributes for this CPU to match the BSP */
2294	mem_range_AP_init();
2295
2296	/*
2297	 * Activate smp_invltlb, although strictly speaking, this isn't
2298	 * quite correct yet.  We should have a bitfield for cpus willing
2299	 * to accept TLB flush IPI's or something and sync them.
2300	 */
2301	if (smp_cpus == mp_ncpus) {
2302		invltlb_ok = 1;
2303		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2304		smp_active = 1;	 /* historic */
2305	}
2306
2307	/* let other AP's wake up now */
2308	mtx_exit(&ap_boot_mtx, MTX_SPIN);
2309
2310	/* wait until all the AP's are up */
2311	while (smp_started == 0)
2312		; /* nothing */
2313
2314	/*
2315	 * Set curproc to our per-cpu idleproc so that mutexes have
2316	 * something unique to lock with.
2317	 */
2318	PCPU_SET(curproc, PCPU_GET(idleproc));
2319
2320	microuptime(PCPU_PTR(switchtime));
2321	PCPU_SET(switchticks, ticks);
2322
2323	/* ok, now grab sched_lock and enter the scheduler */
2324	enable_intr();
2325	mtx_enter(&sched_lock, MTX_SPIN);
2326	cpu_throw();	/* doesn't return */
2327
2328	panic("scheduler returned us to ap_init");
2329}
2330
2331#ifdef BETTER_CLOCK
2332
2333#define CHECKSTATE_USER	0
2334#define CHECKSTATE_SYS	1
2335#define CHECKSTATE_INTR	2
2336
2337/* Do not staticize.  Used from apic_vector.s */
2338struct proc*	checkstate_curproc[MAXCPU];
2339int		checkstate_cpustate[MAXCPU];
2340u_long		checkstate_pc[MAXCPU];
2341
2342#define PC_TO_INDEX(pc, prof)				\
2343        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2344            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2345
2346static void
2347addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2348{
2349	int i;
2350	struct uprof *prof;
2351	u_long pc;
2352
2353	pc = checkstate_pc[id];
2354	prof = &p->p_stats->p_prof;
2355	if (pc >= prof->pr_off &&
2356	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2357		mtx_assert(&sched_lock, MA_OWNED);
2358		if ((p->p_sflag & PS_OWEUPC) == 0) {
2359			prof->pr_addr = pc;
2360			prof->pr_ticks = 1;
2361			p->p_sflag |= PS_OWEUPC;
2362		}
2363		*astmap |= (1 << id);
2364	}
2365}
2366
2367static void
2368forwarded_statclock(int id, int pscnt, int *astmap)
2369{
2370	struct pstats *pstats;
2371	long rss;
2372	struct rusage *ru;
2373	struct vmspace *vm;
2374	int cpustate;
2375	struct proc *p;
2376#ifdef GPROF
2377	register struct gmonparam *g;
2378	int i;
2379#endif
2380
2381	mtx_assert(&sched_lock, MA_OWNED);
2382	p = checkstate_curproc[id];
2383	cpustate = checkstate_cpustate[id];
2384
2385	/* XXX */
2386	if (p->p_ithd)
2387		cpustate = CHECKSTATE_INTR;
2388	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2389		cpustate = CHECKSTATE_SYS;
2390
2391	switch (cpustate) {
2392	case CHECKSTATE_USER:
2393		if (p->p_sflag & PS_PROFIL)
2394			addupc_intr_forwarded(p, id, astmap);
2395		if (pscnt > 1)
2396			return;
2397		p->p_uticks++;
2398		if (p->p_nice > NZERO)
2399			cp_time[CP_NICE]++;
2400		else
2401			cp_time[CP_USER]++;
2402		break;
2403	case CHECKSTATE_SYS:
2404#ifdef GPROF
2405		/*
2406		 * Kernel statistics are just like addupc_intr, only easier.
2407		 */
2408		g = &_gmonparam;
2409		if (g->state == GMON_PROF_ON) {
2410			i = checkstate_pc[id] - g->lowpc;
2411			if (i < g->textsize) {
2412				i /= HISTFRACTION * sizeof(*g->kcount);
2413				g->kcount[i]++;
2414			}
2415		}
2416#endif
2417		if (pscnt > 1)
2418			return;
2419
2420		p->p_sticks++;
2421		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2422			cp_time[CP_IDLE]++;
2423		else
2424			cp_time[CP_SYS]++;
2425		break;
2426	case CHECKSTATE_INTR:
2427	default:
2428#ifdef GPROF
2429		/*
2430		 * Kernel statistics are just like addupc_intr, only easier.
2431		 */
2432		g = &_gmonparam;
2433		if (g->state == GMON_PROF_ON) {
2434			i = checkstate_pc[id] - g->lowpc;
2435			if (i < g->textsize) {
2436				i /= HISTFRACTION * sizeof(*g->kcount);
2437				g->kcount[i]++;
2438			}
2439		}
2440#endif
2441		if (pscnt > 1)
2442			return;
2443		KASSERT(p != NULL, ("NULL process in interrupt state"));
2444		p->p_iticks++;
2445		cp_time[CP_INTR]++;
2446	}
2447
2448	schedclock(p);
2449
2450	/* Update resource usage integrals and maximums. */
2451	if ((pstats = p->p_stats) != NULL &&
2452	    (ru = &pstats->p_ru) != NULL &&
2453	    (vm = p->p_vmspace) != NULL) {
2454		ru->ru_ixrss += pgtok(vm->vm_tsize);
2455		ru->ru_idrss += pgtok(vm->vm_dsize);
2456		ru->ru_isrss += pgtok(vm->vm_ssize);
2457		rss = pgtok(vmspace_resident_count(vm));
2458		if (ru->ru_maxrss < rss)
2459			ru->ru_maxrss = rss;
2460	}
2461}
2462
2463void
2464forward_statclock(int pscnt)
2465{
2466	int map;
2467	int id;
2468	int i;
2469
2470	/* Kludge. We don't yet have separate locks for the interrupts
2471	 * and the kernel. This means that we cannot let the other processors
2472	 * handle complex interrupts while inhibiting them from entering
2473	 * the kernel in a non-interrupt context.
2474	 *
2475	 * What we can do, without changing the locking mechanisms yet,
2476	 * is letting the other processors handle a very simple interrupt
2477	 * (wich determines the processor states), and do the main
2478	 * work ourself.
2479	 */
2480
2481	CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2482
2483	if (!smp_started || !invltlb_ok || cold || panicstr)
2484		return;
2485
2486	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2487
2488	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2489	checkstate_probed_cpus = 0;
2490	if (map != 0)
2491		selected_apic_ipi(map,
2492				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2493
2494	i = 0;
2495	while (checkstate_probed_cpus != map) {
2496		/* spin */
2497		i++;
2498		if (i == 100000) {
2499#ifdef BETTER_CLOCK_DIAGNOSTIC
2500			printf("forward_statclock: checkstate %x\n",
2501			       checkstate_probed_cpus);
2502#endif
2503			break;
2504		}
2505	}
2506
2507	/*
2508	 * Step 2: walk through other processors processes, update ticks and
2509	 * profiling info.
2510	 */
2511
2512	map = 0;
2513	for (id = 0; id < mp_ncpus; id++) {
2514		if (id == PCPU_GET(cpuid))
2515			continue;
2516		if (((1 << id) & checkstate_probed_cpus) == 0)
2517			continue;
2518		forwarded_statclock(id, pscnt, &map);
2519	}
2520	if (map != 0) {
2521		checkstate_need_ast |= map;
2522		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2523		i = 0;
2524		while ((checkstate_need_ast & map) != 0) {
2525			/* spin */
2526			i++;
2527			if (i > 100000) {
2528#ifdef BETTER_CLOCK_DIAGNOSTIC
2529				printf("forward_statclock: dropped ast 0x%x\n",
2530				       checkstate_need_ast & map);
2531#endif
2532				break;
2533			}
2534		}
2535	}
2536}
2537
2538void
2539forward_hardclock(int pscnt)
2540{
2541	int map;
2542	int id;
2543	struct proc *p;
2544	struct pstats *pstats;
2545	int i;
2546
2547	/* Kludge. We don't yet have separate locks for the interrupts
2548	 * and the kernel. This means that we cannot let the other processors
2549	 * handle complex interrupts while inhibiting them from entering
2550	 * the kernel in a non-interrupt context.
2551	 *
2552	 * What we can do, without changing the locking mechanisms yet,
2553	 * is letting the other processors handle a very simple interrupt
2554	 * (wich determines the processor states), and do the main
2555	 * work ourself.
2556	 */
2557
2558	CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2559
2560	if (!smp_started || !invltlb_ok || cold || panicstr)
2561		return;
2562
2563	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2564
2565	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2566	checkstate_probed_cpus = 0;
2567	if (map != 0)
2568		selected_apic_ipi(map,
2569				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2570
2571	i = 0;
2572	while (checkstate_probed_cpus != map) {
2573		/* spin */
2574		i++;
2575		if (i == 100000) {
2576#ifdef BETTER_CLOCK_DIAGNOSTIC
2577			printf("forward_hardclock: checkstate %x\n",
2578			       checkstate_probed_cpus);
2579#endif
2580			break;
2581		}
2582	}
2583
2584	/*
2585	 * Step 2: walk through other processors processes, update virtual
2586	 * timer and profiling timer. If stathz == 0, also update ticks and
2587	 * profiling info.
2588	 */
2589
2590	map = 0;
2591	for (id = 0; id < mp_ncpus; id++) {
2592		if (id == PCPU_GET(cpuid))
2593			continue;
2594		if (((1 << id) & checkstate_probed_cpus) == 0)
2595			continue;
2596		p = checkstate_curproc[id];
2597		if (p) {
2598			pstats = p->p_stats;
2599			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2600			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2601			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2602				p->p_sflag |= PS_ALRMPEND;
2603				map |= (1 << id);
2604			}
2605			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2606			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2607				p->p_sflag |= PS_PROFPEND;
2608				map |= (1 << id);
2609			}
2610		}
2611		if (stathz == 0) {
2612			forwarded_statclock( id, pscnt, &map);
2613		}
2614	}
2615	if (map != 0) {
2616		checkstate_need_ast |= map;
2617		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2618		i = 0;
2619		while ((checkstate_need_ast & map) != 0) {
2620			/* spin */
2621			i++;
2622			if (i > 100000) {
2623#ifdef BETTER_CLOCK_DIAGNOSTIC
2624				printf("forward_hardclock: dropped ast 0x%x\n",
2625				       checkstate_need_ast & map);
2626#endif
2627				break;
2628			}
2629		}
2630	}
2631}
2632
2633#endif /* BETTER_CLOCK */
2634
2635void
2636forward_signal(struct proc *p)
2637{
2638	int map;
2639	int id;
2640	int i;
2641
2642	/* Kludge. We don't yet have separate locks for the interrupts
2643	 * and the kernel. This means that we cannot let the other processors
2644	 * handle complex interrupts while inhibiting them from entering
2645	 * the kernel in a non-interrupt context.
2646	 *
2647	 * What we can do, without changing the locking mechanisms yet,
2648	 * is letting the other processors handle a very simple interrupt
2649	 * (wich determines the processor states), and do the main
2650	 * work ourself.
2651	 */
2652
2653	CTR1(KTR_SMP, "forward_signal(%p)", p);
2654
2655	if (!smp_started || !invltlb_ok || cold || panicstr)
2656		return;
2657	if (!forward_signal_enabled)
2658		return;
2659	mtx_enter(&sched_lock, MTX_SPIN);
2660	while (1) {
2661		if (p->p_stat != SRUN) {
2662			mtx_exit(&sched_lock, MTX_SPIN);
2663			return;
2664		}
2665		id = p->p_oncpu;
2666		mtx_exit(&sched_lock, MTX_SPIN);
2667		if (id == 0xff)
2668			return;
2669		map = (1<<id);
2670		checkstate_need_ast |= map;
2671		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2672		i = 0;
2673		while ((checkstate_need_ast & map) != 0) {
2674			/* spin */
2675			i++;
2676			if (i > 100000) {
2677#if 0
2678				printf("forward_signal: dropped ast 0x%x\n",
2679				       checkstate_need_ast & map);
2680#endif
2681				break;
2682			}
2683		}
2684		mtx_enter(&sched_lock, MTX_SPIN);
2685		if (id == p->p_oncpu) {
2686			mtx_exit(&sched_lock, MTX_SPIN);
2687			return;
2688		}
2689	}
2690}
2691
2692void
2693forward_roundrobin(void)
2694{
2695	u_int map;
2696	int i;
2697
2698	CTR0(KTR_SMP, "forward_roundrobin()");
2699
2700	if (!smp_started || !invltlb_ok || cold || panicstr)
2701		return;
2702	if (!forward_roundrobin_enabled)
2703		return;
2704	resched_cpus |= PCPU_GET(other_cpus);
2705	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2706#if 1
2707	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2708#else
2709	(void) all_but_self_ipi(XCPUAST_OFFSET);
2710#endif
2711	i = 0;
2712	while ((checkstate_need_ast & map) != 0) {
2713		/* spin */
2714		i++;
2715		if (i > 100000) {
2716#if 0
2717			printf("forward_roundrobin: dropped ast 0x%x\n",
2718			       checkstate_need_ast & map);
2719#endif
2720			break;
2721		}
2722	}
2723}
2724
2725/*
2726 * When called the executing CPU will send an IPI to all other CPUs
2727 *  requesting that they halt execution.
2728 *
2729 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2730 *
2731 *  - Signals all CPUs in map to stop.
2732 *  - Waits for each to stop.
2733 *
2734 * Returns:
2735 *  -1: error
2736 *   0: NA
2737 *   1: ok
2738 *
2739 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2740 *            from executing at same time.
2741 */
2742int
2743stop_cpus(u_int map)
2744{
2745	int count = 0;
2746
2747	if (!smp_started)
2748		return 0;
2749
2750	/* send the Xcpustop IPI to all CPUs in map */
2751	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2752
2753	while (count++ < 100000 && (stopped_cpus & map) != map)
2754		/* spin */ ;
2755
2756#ifdef DIAGNOSTIC
2757	if ((stopped_cpus & map) != map)
2758		printf("Warning: CPUs 0x%x did not stop!\n",
2759		    (~(stopped_cpus & map)) & map);
2760#endif
2761
2762	return 1;
2763}
2764
2765
2766/*
2767 * Called by a CPU to restart stopped CPUs.
2768 *
2769 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2770 *
2771 *  - Signals all CPUs in map to restart.
2772 *  - Waits for each to restart.
2773 *
2774 * Returns:
2775 *  -1: error
2776 *   0: NA
2777 *   1: ok
2778 */
2779int
2780restart_cpus(u_int map)
2781{
2782	int count = 0;
2783
2784	if (!smp_started)
2785		return 0;
2786
2787	started_cpus = map;		/* signal other cpus to restart */
2788
2789	/* wait for each to clear its bit */
2790	while (count++ < 100000 && (stopped_cpus & map) != 0)
2791		/* spin */ ;
2792
2793#ifdef DIAGNOSTIC
2794	if ((stopped_cpus & map) != 0)
2795		printf("Warning: CPUs 0x%x did not restart!\n",
2796		    (~(stopped_cpus & map)) & map);
2797#endif
2798
2799	return 1;
2800}
2801
2802
2803#ifdef APIC_INTR_REORDER
2804/*
2805 *	Maintain mapping from softintr vector to isr bit in local apic.
2806 */
2807void
2808set_lapic_isrloc(int intr, int vector)
2809{
2810	if (intr < 0 || intr > 32)
2811		panic("set_apic_isrloc: bad intr argument: %d",intr);
2812	if (vector < ICU_OFFSET || vector > 255)
2813		panic("set_apic_isrloc: bad vector argument: %d",vector);
2814	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2815	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2816}
2817#endif
2818
2819/*
2820 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2821 * (if specified), rendezvous, execute the action function (if specified),
2822 * rendezvous again, execute the teardown function (if specified), and then
2823 * resume.
2824 *
2825 * Note that the supplied external functions _must_ be reentrant and aware
2826 * that they are running in parallel and in an unknown lock context.
2827 */
2828static void (*smp_rv_setup_func)(void *arg);
2829static void (*smp_rv_action_func)(void *arg);
2830static void (*smp_rv_teardown_func)(void *arg);
2831static void *smp_rv_func_arg;
2832static volatile int smp_rv_waiters[2];
2833
2834void
2835smp_rendezvous_action(void)
2836{
2837	/* setup function */
2838	if (smp_rv_setup_func != NULL)
2839		smp_rv_setup_func(smp_rv_func_arg);
2840	/* spin on entry rendezvous */
2841	atomic_add_int(&smp_rv_waiters[0], 1);
2842	while (smp_rv_waiters[0] < mp_ncpus)
2843		;
2844	/* action function */
2845	if (smp_rv_action_func != NULL)
2846		smp_rv_action_func(smp_rv_func_arg);
2847	/* spin on exit rendezvous */
2848	atomic_add_int(&smp_rv_waiters[1], 1);
2849	while (smp_rv_waiters[1] < mp_ncpus)
2850		;
2851	/* teardown function */
2852	if (smp_rv_teardown_func != NULL)
2853		smp_rv_teardown_func(smp_rv_func_arg);
2854}
2855
2856void
2857smp_rendezvous(void (* setup_func)(void *),
2858	       void (* action_func)(void *),
2859	       void (* teardown_func)(void *),
2860	       void *arg)
2861{
2862
2863	/* obtain rendezvous lock */
2864	mtx_enter(&smp_rv_mtx, MTX_SPIN);
2865
2866	/* set static function pointers */
2867	smp_rv_setup_func = setup_func;
2868	smp_rv_action_func = action_func;
2869	smp_rv_teardown_func = teardown_func;
2870	smp_rv_func_arg = arg;
2871	smp_rv_waiters[0] = 0;
2872	smp_rv_waiters[1] = 0;
2873
2874	/*
2875	 * signal other processors, which will enter the IPI with interrupts off
2876	 */
2877	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2878
2879	/* call executor function */
2880	smp_rendezvous_action();
2881
2882	/* release lock */
2883	mtx_exit(&smp_rv_mtx, MTX_SPIN);
2884}
2885
2886void
2887release_aps(void *dummy __unused)
2888{
2889	atomic_store_rel_int(&aps_ready, 1);
2890}
2891
2892SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2893