mp_x86.c revision 64494
150476Speter/*
232283Shelbig * Copyright (c) 1996, by Steve Passe
332283Shelbig * All rights reserved.
432283Shelbig *
532283Shelbig * Redistribution and use in source and binary forms, with or without
632283Shelbig * modification, are permitted provided that the following conditions
732283Shelbig * are met:
8174990Sache * 1. Redistributions of source code must retain the above copyright
932283Shelbig *    notice, this list of conditions and the following disclaimer.
1032283Shelbig * 2. The name of the developer may NOT be used to endorse or promote products
1132283Shelbig *    derived from this software without specific prior written permission.
1232283Shelbig *
1332283Shelbig * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1432283Shelbig * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1532283Shelbig * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1632283Shelbig * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1732283Shelbig * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1832283Shelbig * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1932283Shelbig * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2032283Shelbig * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2132283Shelbig * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2232283Shelbig * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23174990Sache * SUCH DAMAGE.
2432283Shelbig *
2532283Shelbig * $FreeBSD: head/sys/i386/i386/mp_machdep.c 64494 2000-08-10 17:33:24Z tegge $
2632283Shelbig */
2732283Shelbig
2832283Shelbig#include "opt_smp.h"
2932283Shelbig#include "opt_cpu.h"
3032283Shelbig#include "opt_user_ldt.h"
3132283Shelbig
3232283Shelbig#ifdef SMP
3332283Shelbig#include <machine/smptests.h>
3432283Shelbig#else
3532283Shelbig#error
3632283Shelbig#endif
3732283Shelbig
3832283Shelbig#include <sys/param.h>
3932283Shelbig#include <sys/systm.h>
4032283Shelbig#include <sys/kernel.h>
4132283Shelbig#include <sys/proc.h>
4232283Shelbig#include <sys/sysctl.h>
4332283Shelbig#include <sys/malloc.h>
4432283Shelbig#include <sys/memrange.h>
4532283Shelbig#ifdef BETTER_CLOCK
4632283Shelbig#include <sys/dkstat.h>
4732283Shelbig#endif
4832283Shelbig#include <sys/cons.h>	/* cngetc() */
4932283Shelbig
5032283Shelbig#include <vm/vm.h>
5132283Shelbig#include <vm/vm_param.h>
5232283Shelbig#include <vm/pmap.h>
5332283Shelbig#include <vm/vm_kern.h>
5432283Shelbig#include <vm/vm_extern.h>
5532283Shelbig#ifdef BETTER_CLOCK
5632283Shelbig#include <sys/lock.h>
5732283Shelbig#include <vm/vm_map.h>
5832283Shelbig#include <sys/user.h>
5932283Shelbig#ifdef GPROF
6032283Shelbig#include <sys/gmon.h>
6132283Shelbig#endif
6232283Shelbig#endif
6332283Shelbig
6474570Sache#include <machine/smp.h>
6532283Shelbig#include <machine/apic.h>
6632283Shelbig#include <machine/atomic.h>
6732283Shelbig#include <machine/cpufunc.h>
6854090Sache#include <machine/mpapic.h>
6932283Shelbig#include <machine/psl.h>
7032283Shelbig#include <machine/segments.h>
7132283Shelbig#include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
7232283Shelbig#include <machine/tss.h>
7332283Shelbig#include <machine/specialreg.h>
7432283Shelbig#include <machine/globaldata.h>
7532283Shelbig
7632283Shelbig#if defined(APIC_IO)
7732283Shelbig#include <machine/md_var.h>		/* setidt() */
7832283Shelbig#include <i386/isa/icu.h>		/* IPIs */
7932283Shelbig#include <i386/isa/intr_machdep.h>	/* IPIs */
8054090Sache#endif	/* APIC_IO */
8153943Sache
82174990Sache#if defined(TEST_DEFAULT_CONFIG)
8353943Sache#define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
8453943Sache#else
8553943Sache#define MPFPS_MPFB1	mpfps->mpfb1
8653943Sache#endif  /* TEST_DEFAULT_CONFIG */
8753943Sache
8853943Sache#define WARMBOOT_TARGET		0
8953943Sache#define WARMBOOT_OFF		(KERNBASE + 0x0467)
9053943Sache#define WARMBOOT_SEG		(KERNBASE + 0x0469)
9153943Sache
9253943Sache#ifdef PC98
9353943Sache#define BIOS_BASE		(0xe8000)
9453943Sache#define BIOS_SIZE		(0x18000)
9553943Sache#else
9653943Sache#define BIOS_BASE		(0xf0000)
9774413Sache#define BIOS_SIZE		(0x10000)
9853943Sache#endif
9974413Sache#define BIOS_COUNT		(BIOS_SIZE/4)
10053961Sache
10174413Sache#define CMOS_REG		(0x70)
10253961Sache#define CMOS_DATA		(0x71)
10374413Sache#define BIOS_RESET		(0x0f)
10474413Sache#define BIOS_WARM		(0x0a)
105
106#define PROCENTRY_FLAG_EN	0x01
107#define PROCENTRY_FLAG_BP	0x02
108#define IOAPICENTRY_FLAG_EN	0x01
109
110
111/* MP Floating Pointer Structure */
112typedef struct MPFPS {
113	char    signature[4];
114	void   *pap;
115	u_char  length;
116	u_char  spec_rev;
117	u_char  checksum;
118	u_char  mpfb1;
119	u_char  mpfb2;
120	u_char  mpfb3;
121	u_char  mpfb4;
122	u_char  mpfb5;
123}      *mpfps_t;
124
125/* MP Configuration Table Header */
126typedef struct MPCTH {
127	char    signature[4];
128	u_short base_table_length;
129	u_char  spec_rev;
130	u_char  checksum;
131	u_char  oem_id[8];
132	u_char  product_id[12];
133	void   *oem_table_pointer;
134	u_short oem_table_size;
135	u_short entry_count;
136	void   *apic_address;
137	u_short extended_table_length;
138	u_char  extended_table_checksum;
139	u_char  reserved;
140}      *mpcth_t;
141
142
143typedef struct PROCENTRY {
144	u_char  type;
145	u_char  apic_id;
146	u_char  apic_version;
147	u_char  cpu_flags;
148	u_long  cpu_signature;
149	u_long  feature_flags;
150	u_long  reserved1;
151	u_long  reserved2;
152}      *proc_entry_ptr;
153
154typedef struct BUSENTRY {
155	u_char  type;
156	u_char  bus_id;
157	char    bus_type[6];
158}      *bus_entry_ptr;
159
160typedef struct IOAPICENTRY {
161	u_char  type;
162	u_char  apic_id;
163	u_char  apic_version;
164	u_char  apic_flags;
165	void   *apic_address;
166}      *io_apic_entry_ptr;
167
168typedef struct INTENTRY {
169	u_char  type;
170	u_char  int_type;
171	u_short int_flags;
172	u_char  src_bus_id;
173	u_char  src_bus_irq;
174	u_char  dst_apic_id;
175	u_char  dst_apic_int;
176}      *int_entry_ptr;
177
178/* descriptions of MP basetable entries */
179typedef struct BASETABLE_ENTRY {
180	u_char  type;
181	u_char  length;
182	char    name[16];
183}       basetable_entry;
184
185/*
186 * this code MUST be enabled here and in mpboot.s.
187 * it follows the very early stages of AP boot by placing values in CMOS ram.
188 * it NORMALLY will never be needed and thus the primitive method for enabling.
189 *
190#define CHECK_POINTS
191 */
192
193#if defined(CHECK_POINTS) && !defined(PC98)
194#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
195#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
196
197#define CHECK_INIT(D);				\
198	CHECK_WRITE(0x34, (D));			\
199	CHECK_WRITE(0x35, (D));			\
200	CHECK_WRITE(0x36, (D));			\
201	CHECK_WRITE(0x37, (D));			\
202	CHECK_WRITE(0x38, (D));			\
203	CHECK_WRITE(0x39, (D));
204
205#define CHECK_PRINT(S);				\
206	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
207	   (S),					\
208	   CHECK_READ(0x34),			\
209	   CHECK_READ(0x35),			\
210	   CHECK_READ(0x36),			\
211	   CHECK_READ(0x37),			\
212	   CHECK_READ(0x38),			\
213	   CHECK_READ(0x39));
214
215#else				/* CHECK_POINTS */
216
217#define CHECK_INIT(D)
218#define CHECK_PRINT(S)
219
220#endif				/* CHECK_POINTS */
221
222/*
223 * Values to send to the POST hardware.
224 */
225#define MP_BOOTADDRESS_POST	0x10
226#define MP_PROBE_POST		0x11
227#define MPTABLE_PASS1_POST	0x12
228
229#define MP_START_POST		0x13
230#define MP_ENABLE_POST		0x14
231#define MPTABLE_PASS2_POST	0x15
232
233#define START_ALL_APS_POST	0x16
234#define INSTALL_AP_TRAMP_POST	0x17
235#define START_AP_POST		0x18
236
237#define MP_ANNOUNCE_POST	0x19
238
239
240/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
241int	current_postcode;
242
243/** XXX FIXME: what system files declare these??? */
244extern struct region_descriptor r_gdt, r_idt;
245
246int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
247int	mp_ncpus;		/* # of CPUs, including BSP */
248int	mp_naps;		/* # of Applications processors */
249int	mp_nbusses;		/* # of busses */
250int	mp_napics;		/* # of IO APICs */
251int	boot_cpu_id;		/* designated BSP */
252vm_offset_t cpu_apic_address;
253vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
254extern	int nkpt;
255
256u_int32_t cpu_apic_versions[NCPU];
257u_int32_t io_apic_versions[NAPIC];
258
259#ifdef APIC_INTR_DIAGNOSTIC
260int apic_itrace_enter[32];
261int apic_itrace_tryisrlock[32];
262int apic_itrace_gotisrlock[32];
263int apic_itrace_active[32];
264int apic_itrace_masked[32];
265int apic_itrace_noisrlock[32];
266int apic_itrace_masked2[32];
267int apic_itrace_unmask[32];
268int apic_itrace_noforward[32];
269int apic_itrace_leave[32];
270int apic_itrace_enter2[32];
271int apic_itrace_doreti[32];
272int apic_itrace_splz[32];
273int apic_itrace_eoi[32];
274#ifdef APIC_INTR_DIAGNOSTIC_IRQ
275unsigned short apic_itrace_debugbuffer[32768];
276int apic_itrace_debugbuffer_idx;
277struct simplelock apic_itrace_debuglock;
278#endif
279#endif
280
281#ifdef APIC_INTR_REORDER
282struct {
283	volatile int *location;
284	int bit;
285} apic_isrbit_location[32];
286#endif
287
288struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
289
290/*
291 * APIC ID logical/physical mapping structures.
292 * We oversize these to simplify boot-time config.
293 */
294int     cpu_num_to_apic_id[NAPICID];
295int     io_num_to_apic_id[NAPICID];
296int     apic_id_to_logical[NAPICID];
297
298
299/* Bitmap of all available CPUs */
300u_int	all_cpus;
301
302/* AP uses this during bootstrap.  Do not staticize.  */
303char *bootSTK;
304static int bootAP;
305
306/* Hotwire a 0->4MB V==P mapping */
307extern pt_entry_t *KPTphys;
308
309/* SMP page table page */
310extern pt_entry_t *SMPpt;
311
312struct pcb stoppcbs[NCPU];
313
314int smp_started;		/* has the system started? */
315
316/*
317 * Local data and functions.
318 */
319
320static int	mp_capable;
321static u_int	boot_address;
322static u_int	base_memory;
323
324static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
325static mpfps_t	mpfps;
326static int	search_for_sig(u_int32_t target, int count);
327static void	mp_enable(u_int boot_addr);
328
329static int	mptable_pass1(void);
330static int	mptable_pass2(void);
331static void	default_mp_table(int type);
332static void	fix_mp_table(void);
333static void	setup_apic_irq_mapping(void);
334static void	init_locks(void);
335static int	start_all_aps(u_int boot_addr);
336static void	install_ap_tramp(u_int boot_addr);
337static int	start_ap(int logicalCpu, u_int boot_addr);
338static int	apic_int_is_bus_type(int intr, int bus_type);
339
340/*
341 * Calculate usable address in base memory for AP trampoline code.
342 */
343u_int
344mp_bootaddress(u_int basemem)
345{
346	POSTCODE(MP_BOOTADDRESS_POST);
347
348	base_memory = basemem * 1024;	/* convert to bytes */
349
350	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
351	if ((base_memory - boot_address) < bootMP_size)
352		boot_address -= 4096;	/* not enough, lower by 4k */
353
354	return boot_address;
355}
356
357
358/*
359 * Look for an Intel MP spec table (ie, SMP capable hardware).
360 */
361int
362mp_probe(void)
363{
364	int     x;
365	u_long  segment;
366	u_int32_t target;
367
368	POSTCODE(MP_PROBE_POST);
369
370	/* see if EBDA exists */
371	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
372		/* search first 1K of EBDA */
373		target = (u_int32_t) (segment << 4);
374		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
375			goto found;
376	} else {
377		/* last 1K of base memory, effective 'top of base' passed in */
378		target = (u_int32_t) (base_memory - 0x400);
379		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
380			goto found;
381	}
382
383	/* search the BIOS */
384	target = (u_int32_t) BIOS_BASE;
385	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
386		goto found;
387
388	/* nothing found */
389	mpfps = (mpfps_t)0;
390	mp_capable = 0;
391	return 0;
392
393found:
394	/* calculate needed resources */
395	mpfps = (mpfps_t)x;
396	if (mptable_pass1())
397		panic("you must reconfigure your kernel");
398
399	/* flag fact that we are running multiple processors */
400	mp_capable = 1;
401	return 1;
402}
403
404
405/*
406 * Startup the SMP processors.
407 */
408void
409mp_start(void)
410{
411	POSTCODE(MP_START_POST);
412
413	/* look for MP capable motherboard */
414	if (mp_capable)
415		mp_enable(boot_address);
416	else
417		panic("MP hardware not found!");
418}
419
420
421/*
422 * Print various information about the SMP system hardware and setup.
423 */
424void
425mp_announce(void)
426{
427	int     x;
428
429	POSTCODE(MP_ANNOUNCE_POST);
430
431	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
432	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
433	printf(", version: 0x%08x", cpu_apic_versions[0]);
434	printf(", at 0x%08x\n", cpu_apic_address);
435	for (x = 1; x <= mp_naps; ++x) {
436		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
437		printf(", version: 0x%08x", cpu_apic_versions[x]);
438		printf(", at 0x%08x\n", cpu_apic_address);
439	}
440
441#if defined(APIC_IO)
442	for (x = 0; x < mp_napics; ++x) {
443		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
444		printf(", version: 0x%08x", io_apic_versions[x]);
445		printf(", at 0x%08x\n", io_apic_address[x]);
446	}
447#else
448	printf(" Warning: APIC I/O disabled\n");
449#endif	/* APIC_IO */
450}
451
452/*
453 * AP cpu's call this to sync up protected mode.
454 */
455void
456init_secondary(void)
457{
458	int	gsel_tss;
459	int	x, myid = bootAP;
460
461	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
462	gdt_segs[GPROC0_SEL].ssd_base =
463		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
464	SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid];
465
466	for (x = 0; x < NGDT; x++) {
467		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
468	}
469
470	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
471	r_gdt.rd_base = (int) &gdt[myid * NGDT];
472	lgdt(&r_gdt);			/* does magic intra-segment return */
473
474	lidt(&r_idt);
475
476	lldt(_default_ldt);
477#ifdef USER_LDT
478	currentldt = _default_ldt;
479#endif
480
481	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
482	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
483	common_tss.tss_esp0 = 0;	/* not used until after switch */
484	common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
485	common_tss.tss_ioopt = (sizeof common_tss) << 16;
486	tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
487	common_tssd = *tss_gdt;
488	ltr(gsel_tss);
489
490	load_cr0(0x8005003b);		/* XXX! */
491
492	pmap_set_opt();
493}
494
495
496#if defined(APIC_IO)
497/*
498 * Final configuration of the BSP's local APIC:
499 *  - disable 'pic mode'.
500 *  - disable 'virtual wire mode'.
501 *  - enable NMI.
502 */
503void
504bsp_apic_configure(void)
505{
506	u_char		byte;
507	u_int32_t	temp;
508
509	/* leave 'pic mode' if necessary */
510	if (picmode) {
511		outb(0x22, 0x70);	/* select IMCR */
512		byte = inb(0x23);	/* current contents */
513		byte |= 0x01;		/* mask external INTR */
514		outb(0x23, byte);	/* disconnect 8259s/NMI */
515	}
516
517	/* mask lint0 (the 8259 'virtual wire' connection) */
518	temp = lapic.lvt_lint0;
519	temp |= APIC_LVT_M;		/* set the mask */
520	lapic.lvt_lint0 = temp;
521
522        /* setup lint1 to handle NMI */
523        temp = lapic.lvt_lint1;
524        temp &= ~APIC_LVT_M;		/* clear the mask */
525        lapic.lvt_lint1 = temp;
526
527	if (bootverbose)
528		apic_dump("bsp_apic_configure()");
529}
530#endif  /* APIC_IO */
531
532
533/*******************************************************************
534 * local functions and data
535 */
536
537/*
538 * start the SMP system
539 */
540static void
541mp_enable(u_int boot_addr)
542{
543	int     x;
544#if defined(APIC_IO)
545	int     apic;
546	u_int   ux;
547#endif	/* APIC_IO */
548
549	POSTCODE(MP_ENABLE_POST);
550
551	/* turn on 4MB of V == P addressing so we can get to MP table */
552	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
553	invltlb();
554
555	/* examine the MP table for needed info, uses physical addresses */
556	x = mptable_pass2();
557
558	*(int *)PTD = 0;
559	invltlb();
560
561	/* can't process default configs till the CPU APIC is pmapped */
562	if (x)
563		default_mp_table(x);
564
565	/* post scan cleanup */
566	fix_mp_table();
567	setup_apic_irq_mapping();
568
569#if defined(APIC_IO)
570
571	/* fill the LOGICAL io_apic_versions table */
572	for (apic = 0; apic < mp_napics; ++apic) {
573		ux = io_apic_read(apic, IOAPIC_VER);
574		io_apic_versions[apic] = ux;
575		io_apic_set_id(apic, IO_TO_ID(apic));
576	}
577
578	/* program each IO APIC in the system */
579	for (apic = 0; apic < mp_napics; ++apic)
580		if (io_apic_setup(apic) < 0)
581			panic("IO APIC setup failure");
582
583	/* install a 'Spurious INTerrupt' vector */
584	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
585	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
586
587	/* install an inter-CPU IPI for TLB invalidation */
588	setidt(XINVLTLB_OFFSET, Xinvltlb,
589	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
590
591#ifdef BETTER_CLOCK
592	/* install an inter-CPU IPI for reading processor state */
593	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
594	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
595#endif
596
597	/* install an inter-CPU IPI for all-CPU rendezvous */
598	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
599	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
600
601	/* install an inter-CPU IPI for forcing an additional software trap */
602	setidt(XCPUAST_OFFSET, Xcpuast,
603	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
604
605	/* install an inter-CPU IPI for interrupt forwarding */
606	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
607	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
608
609	/* install an inter-CPU IPI for CPU stop/restart */
610	setidt(XCPUSTOP_OFFSET, Xcpustop,
611	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
612
613#if defined(TEST_TEST1)
614	/* install a "fake hardware INTerrupt" vector */
615	setidt(XTEST1_OFFSET, Xtest1,
616	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
617#endif  /** TEST_TEST1 */
618
619#endif	/* APIC_IO */
620
621	/* initialize all SMP locks */
622	init_locks();
623
624	/* start each Application Processor */
625	start_all_aps(boot_addr);
626
627	/*
628	 * The init process might be started on a different CPU now,
629	 * and the boot CPU might not call prepare_usermode to get
630	 * cr0 correctly configured. Thus we initialize cr0 here.
631	 */
632	load_cr0(rcr0() | CR0_WP | CR0_AM);
633}
634
635
636/*
637 * look for the MP spec signature
638 */
639
640/* string defined by the Intel MP Spec as identifying the MP table */
641#define MP_SIG		0x5f504d5f	/* _MP_ */
642#define NEXT(X)		((X) += 4)
643static int
644search_for_sig(u_int32_t target, int count)
645{
646	int     x;
647	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
648
649	for (x = 0; x < count; NEXT(x))
650		if (addr[x] == MP_SIG)
651			/* make array index a byte index */
652			return (target + (x * sizeof(u_int32_t)));
653
654	return -1;
655}
656
657
658static basetable_entry basetable_entry_types[] =
659{
660	{0, 20, "Processor"},
661	{1, 8, "Bus"},
662	{2, 8, "I/O APIC"},
663	{3, 8, "I/O INT"},
664	{4, 8, "Local INT"}
665};
666
667typedef struct BUSDATA {
668	u_char  bus_id;
669	enum busTypes bus_type;
670}       bus_datum;
671
672typedef struct INTDATA {
673	u_char  int_type;
674	u_short int_flags;
675	u_char  src_bus_id;
676	u_char  src_bus_irq;
677	u_char  dst_apic_id;
678	u_char  dst_apic_int;
679	u_char	int_vector;
680}       io_int, local_int;
681
682typedef struct BUSTYPENAME {
683	u_char  type;
684	char    name[7];
685}       bus_type_name;
686
687static bus_type_name bus_type_table[] =
688{
689	{CBUS, "CBUS"},
690	{CBUSII, "CBUSII"},
691	{EISA, "EISA"},
692	{MCA, "MCA"},
693	{UNKNOWN_BUSTYPE, "---"},
694	{ISA, "ISA"},
695	{MCA, "MCA"},
696	{UNKNOWN_BUSTYPE, "---"},
697	{UNKNOWN_BUSTYPE, "---"},
698	{UNKNOWN_BUSTYPE, "---"},
699	{UNKNOWN_BUSTYPE, "---"},
700	{UNKNOWN_BUSTYPE, "---"},
701	{PCI, "PCI"},
702	{UNKNOWN_BUSTYPE, "---"},
703	{UNKNOWN_BUSTYPE, "---"},
704	{UNKNOWN_BUSTYPE, "---"},
705	{UNKNOWN_BUSTYPE, "---"},
706	{XPRESS, "XPRESS"},
707	{UNKNOWN_BUSTYPE, "---"}
708};
709/* from MP spec v1.4, table 5-1 */
710static int default_data[7][5] =
711{
712/*   nbus, id0, type0, id1, type1 */
713	{1, 0, ISA, 255, 255},
714	{1, 0, EISA, 255, 255},
715	{1, 0, EISA, 255, 255},
716	{1, 0, MCA, 255, 255},
717	{2, 0, ISA, 1, PCI},
718	{2, 0, EISA, 1, PCI},
719	{2, 0, MCA, 1, PCI}
720};
721
722
723/* the bus data */
724static bus_datum bus_data[NBUS];
725
726/* the IO INT data, one entry per possible APIC INTerrupt */
727static io_int  io_apic_ints[NINTR];
728
729static int nintrs;
730
731static int processor_entry	__P((proc_entry_ptr entry, int cpu));
732static int bus_entry		__P((bus_entry_ptr entry, int bus));
733static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
734static int int_entry		__P((int_entry_ptr entry, int intr));
735static int lookup_bus_type	__P((char *name));
736
737
738/*
739 * 1st pass on motherboard's Intel MP specification table.
740 *
741 * initializes:
742 *	mp_ncpus = 1
743 *
744 * determines:
745 *	cpu_apic_address (common to all CPUs)
746 *	io_apic_address[N]
747 *	mp_naps
748 *	mp_nbusses
749 *	mp_napics
750 *	nintrs
751 */
752static int
753mptable_pass1(void)
754{
755	int	x;
756	mpcth_t	cth;
757	int	totalSize;
758	void*	position;
759	int	count;
760	int	type;
761	int	mustpanic;
762
763	POSTCODE(MPTABLE_PASS1_POST);
764
765	mustpanic = 0;
766
767	/* clear various tables */
768	for (x = 0; x < NAPICID; ++x) {
769		io_apic_address[x] = ~0;	/* IO APIC address table */
770	}
771
772	/* init everything to empty */
773	mp_naps = 0;
774	mp_nbusses = 0;
775	mp_napics = 0;
776	nintrs = 0;
777
778	/* check for use of 'default' configuration */
779	if (MPFPS_MPFB1 != 0) {
780		/* use default addresses */
781		cpu_apic_address = DEFAULT_APIC_BASE;
782		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
783
784		/* fill in with defaults */
785		mp_naps = 2;		/* includes BSP */
786		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
787#if defined(APIC_IO)
788		mp_napics = 1;
789		nintrs = 16;
790#endif	/* APIC_IO */
791	}
792	else {
793		if ((cth = mpfps->pap) == 0)
794			panic("MP Configuration Table Header MISSING!");
795
796		cpu_apic_address = (vm_offset_t) cth->apic_address;
797
798		/* walk the table, recording info of interest */
799		totalSize = cth->base_table_length - sizeof(struct MPCTH);
800		position = (u_char *) cth + sizeof(struct MPCTH);
801		count = cth->entry_count;
802
803		while (count--) {
804			switch (type = *(u_char *) position) {
805			case 0: /* processor_entry */
806				if (((proc_entry_ptr)position)->cpu_flags
807					& PROCENTRY_FLAG_EN)
808					++mp_naps;
809				break;
810			case 1: /* bus_entry */
811				++mp_nbusses;
812				break;
813			case 2: /* io_apic_entry */
814				if (((io_apic_entry_ptr)position)->apic_flags
815					& IOAPICENTRY_FLAG_EN)
816					io_apic_address[mp_napics++] =
817					    (vm_offset_t)((io_apic_entry_ptr)
818						position)->apic_address;
819				break;
820			case 3: /* int_entry */
821				++nintrs;
822				break;
823			case 4:	/* int_entry */
824				break;
825			default:
826				panic("mpfps Base Table HOSED!");
827				/* NOTREACHED */
828			}
829
830			totalSize -= basetable_entry_types[type].length;
831			(u_char*)position += basetable_entry_types[type].length;
832		}
833	}
834
835	/* qualify the numbers */
836	if (mp_naps > NCPU) {
837		printf("Warning: only using %d of %d available CPUs!\n",
838			NCPU, mp_naps);
839		mp_naps = NCPU;
840	}
841	if (mp_nbusses > NBUS) {
842		printf("found %d busses, increase NBUS\n", mp_nbusses);
843		mustpanic = 1;
844	}
845	if (mp_napics > NAPIC) {
846		printf("found %d apics, increase NAPIC\n", mp_napics);
847		mustpanic = 1;
848	}
849	if (nintrs > NINTR) {
850		printf("found %d intrs, increase NINTR\n", nintrs);
851		mustpanic = 1;
852	}
853
854	/*
855	 * Count the BSP.
856	 * This is also used as a counter while starting the APs.
857	 */
858	mp_ncpus = 1;
859
860	--mp_naps;	/* subtract the BSP */
861
862	return mustpanic;
863}
864
865
866/*
867 * 2nd pass on motherboard's Intel MP specification table.
868 *
869 * sets:
870 *	boot_cpu_id
871 *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
872 *	CPU_TO_ID(N), logical CPU to APIC ID table
873 *	IO_TO_ID(N), logical IO to APIC ID table
874 *	bus_data[N]
875 *	io_apic_ints[N]
876 */
877static int
878mptable_pass2(void)
879{
880	int     x;
881	mpcth_t cth;
882	int     totalSize;
883	void*   position;
884	int     count;
885	int     type;
886	int     apic, bus, cpu, intr;
887
888	POSTCODE(MPTABLE_PASS2_POST);
889
890	/* clear various tables */
891	for (x = 0; x < NAPICID; ++x) {
892		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
893		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
894		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
895	}
896
897	/* clear bus data table */
898	for (x = 0; x < NBUS; ++x)
899		bus_data[x].bus_id = 0xff;
900
901	/* clear IO APIC INT table */
902	for (x = 0; x < NINTR; ++x) {
903		io_apic_ints[x].int_type = 0xff;
904		io_apic_ints[x].int_vector = 0xff;
905	}
906
907	/* setup the cpu/apic mapping arrays */
908	boot_cpu_id = -1;
909
910	/* record whether PIC or virtual-wire mode */
911	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
912
913	/* check for use of 'default' configuration */
914	if (MPFPS_MPFB1 != 0)
915		return MPFPS_MPFB1;	/* return default configuration type */
916
917	if ((cth = mpfps->pap) == 0)
918		panic("MP Configuration Table Header MISSING!");
919
920	/* walk the table, recording info of interest */
921	totalSize = cth->base_table_length - sizeof(struct MPCTH);
922	position = (u_char *) cth + sizeof(struct MPCTH);
923	count = cth->entry_count;
924	apic = bus = intr = 0;
925	cpu = 1;				/* pre-count the BSP */
926
927	while (count--) {
928		switch (type = *(u_char *) position) {
929		case 0:
930			if (processor_entry(position, cpu))
931				++cpu;
932			break;
933		case 1:
934			if (bus_entry(position, bus))
935				++bus;
936			break;
937		case 2:
938			if (io_apic_entry(position, apic))
939				++apic;
940			break;
941		case 3:
942			if (int_entry(position, intr))
943				++intr;
944			break;
945		case 4:
946			/* int_entry(position); */
947			break;
948		default:
949			panic("mpfps Base Table HOSED!");
950			/* NOTREACHED */
951		}
952
953		totalSize -= basetable_entry_types[type].length;
954		(u_char *) position += basetable_entry_types[type].length;
955	}
956
957	if (boot_cpu_id == -1)
958		panic("NO BSP found!");
959
960	/* report fact that its NOT a default configuration */
961	return 0;
962}
963
964
965void
966assign_apic_irq(int apic, int intpin, int irq)
967{
968	int x;
969
970	if (int_to_apicintpin[irq].ioapic != -1)
971		panic("assign_apic_irq: inconsistent table");
972
973	int_to_apicintpin[irq].ioapic = apic;
974	int_to_apicintpin[irq].int_pin = intpin;
975	int_to_apicintpin[irq].apic_address = ioapic[apic];
976	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
977
978	for (x = 0; x < nintrs; x++) {
979		if ((io_apic_ints[x].int_type == 0 ||
980		     io_apic_ints[x].int_type == 3) &&
981		    io_apic_ints[x].int_vector == 0xff &&
982		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
983		    io_apic_ints[x].dst_apic_int == intpin)
984			io_apic_ints[x].int_vector = irq;
985	}
986}
987
988void
989revoke_apic_irq(int irq)
990{
991	int x;
992	int oldapic;
993	int oldintpin;
994
995	if (int_to_apicintpin[irq].ioapic == -1)
996		panic("assign_apic_irq: inconsistent table");
997
998	oldapic = int_to_apicintpin[irq].ioapic;
999	oldintpin = int_to_apicintpin[irq].int_pin;
1000
1001	int_to_apicintpin[irq].ioapic = -1;
1002	int_to_apicintpin[irq].int_pin = 0;
1003	int_to_apicintpin[irq].apic_address = NULL;
1004	int_to_apicintpin[irq].redirindex = 0;
1005
1006	for (x = 0; x < nintrs; x++) {
1007		if ((io_apic_ints[x].int_type == 0 ||
1008		     io_apic_ints[x].int_type == 3) &&
1009		    io_apic_ints[x].int_vector == 0xff &&
1010		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1011		    io_apic_ints[x].dst_apic_int == oldintpin)
1012			io_apic_ints[x].int_vector = 0xff;
1013	}
1014}
1015
1016
1017
1018static void
1019swap_apic_id(int apic, int oldid, int newid)
1020{
1021	int x;
1022	int oapic;
1023
1024
1025	if (oldid == newid)
1026		return;			/* Nothing to do */
1027
1028	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1029	       apic, oldid, newid);
1030
1031	/* Swap physical APIC IDs in interrupt entries */
1032	for (x = 0; x < nintrs; x++) {
1033		if (io_apic_ints[x].dst_apic_id == oldid)
1034			io_apic_ints[x].dst_apic_id = newid;
1035		else if (io_apic_ints[x].dst_apic_id == newid)
1036			io_apic_ints[x].dst_apic_id = oldid;
1037	}
1038
1039	/* Swap physical APIC IDs in IO_TO_ID mappings */
1040	for (oapic = 0; oapic < mp_napics; oapic++)
1041		if (IO_TO_ID(oapic) == newid)
1042			break;
1043
1044	if (oapic < mp_napics) {
1045		printf("Changing APIC ID for IO APIC #%d from "
1046		       "%d to %d in MP table\n",
1047		       oapic, newid, oldid);
1048		IO_TO_ID(oapic) = oldid;
1049	}
1050	IO_TO_ID(apic) = newid;
1051}
1052
1053
1054static void
1055fix_id_to_io_mapping(void)
1056{
1057	int x;
1058
1059	for (x = 0; x < NAPICID; x++)
1060		ID_TO_IO(x) = -1;
1061
1062	for (x = 0; x <= mp_naps; x++)
1063		if (CPU_TO_ID(x) < NAPICID)
1064			ID_TO_IO(CPU_TO_ID(x)) = x;
1065
1066	for (x = 0; x < mp_napics; x++)
1067		if (IO_TO_ID(x) < NAPICID)
1068			ID_TO_IO(IO_TO_ID(x)) = x;
1069}
1070
1071
1072static int
1073first_free_apic_id(void)
1074{
1075	int freeid, x;
1076
1077	for (freeid = 0; freeid < NAPICID; freeid++) {
1078		for (x = 0; x <= mp_naps; x++)
1079			if (CPU_TO_ID(x) == freeid)
1080				break;
1081		if (x <= mp_naps)
1082			continue;
1083		for (x = 0; x < mp_napics; x++)
1084			if (IO_TO_ID(x) == freeid)
1085				break;
1086		if (x < mp_napics)
1087			continue;
1088		return freeid;
1089	}
1090	return freeid;
1091}
1092
1093
1094static int
1095io_apic_id_acceptable(int apic, int id)
1096{
1097	int cpu;		/* Logical CPU number */
1098	int oapic;		/* Logical IO APIC number for other IO APIC */
1099
1100	if (id >= NAPICID)
1101		return 0;	/* Out of range */
1102
1103	for (cpu = 0; cpu <= mp_naps; cpu++)
1104		if (CPU_TO_ID(cpu) == id)
1105			return 0;	/* Conflict with CPU */
1106
1107	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1108		if (IO_TO_ID(oapic) == id)
1109			return 0;	/* Conflict with other APIC */
1110
1111	return 1;		/* ID is acceptable for IO APIC */
1112}
1113
1114
1115/*
1116 * parse an Intel MP specification table
1117 */
1118static void
1119fix_mp_table(void)
1120{
1121	int	x;
1122	int	id;
1123	int	bus_0 = 0;	/* Stop GCC warning */
1124	int	bus_pci = 0;	/* Stop GCC warning */
1125	int	num_pci_bus;
1126	int	apic;		/* IO APIC unit number */
1127	int     freeid;		/* Free physical APIC ID */
1128	int	physid;		/* Current physical IO APIC ID */
1129
1130	/*
1131	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1132	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1133	 * exists the BIOS must begin with bus entries for the PCI bus and use
1134	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1135	 * exists the BIOS can choose to ignore this ordering, and indeed many
1136	 * MP motherboards do ignore it.  This causes a problem when the PCI
1137	 * sub-system makes requests of the MP sub-system based on PCI bus
1138	 * numbers.	So here we look for the situation and renumber the
1139	 * busses and associated INTs in an effort to "make it right".
1140	 */
1141
1142	/* find bus 0, PCI bus, count the number of PCI busses */
1143	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1144		if (bus_data[x].bus_id == 0) {
1145			bus_0 = x;
1146		}
1147		if (bus_data[x].bus_type == PCI) {
1148			++num_pci_bus;
1149			bus_pci = x;
1150		}
1151	}
1152	/*
1153	 * bus_0 == slot of bus with ID of 0
1154	 * bus_pci == slot of last PCI bus encountered
1155	 */
1156
1157	/* check the 1 PCI bus case for sanity */
1158	/* if it is number 0 all is well */
1159	if (num_pci_bus == 1 &&
1160	    bus_data[bus_pci].bus_id != 0) {
1161
1162		/* mis-numbered, swap with whichever bus uses slot 0 */
1163
1164		/* swap the bus entry types */
1165		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1166		bus_data[bus_0].bus_type = PCI;
1167
1168		/* swap each relavant INTerrupt entry */
1169		id = bus_data[bus_pci].bus_id;
1170		for (x = 0; x < nintrs; ++x) {
1171			if (io_apic_ints[x].src_bus_id == id) {
1172				io_apic_ints[x].src_bus_id = 0;
1173			}
1174			else if (io_apic_ints[x].src_bus_id == 0) {
1175				io_apic_ints[x].src_bus_id = id;
1176			}
1177		}
1178	}
1179
1180	/* Assign IO APIC IDs.
1181	 *
1182	 * First try the existing ID. If a conflict is detected, try
1183	 * the ID in the MP table.  If a conflict is still detected, find
1184	 * a free id.
1185	 *
1186	 * We cannot use the ID_TO_IO table before all conflicts has been
1187	 * resolved and the table has been corrected.
1188	 */
1189	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1190
1191		/* First try to use the value set by the BIOS */
1192		physid = io_apic_get_id(apic);
1193		if (io_apic_id_acceptable(apic, physid)) {
1194			if (IO_TO_ID(apic) != physid)
1195				swap_apic_id(apic, IO_TO_ID(apic), physid);
1196			continue;
1197		}
1198
1199		/* Then check if the value in the MP table is acceptable */
1200		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1201			continue;
1202
1203		/* Last resort, find a free APIC ID and use it */
1204		freeid = first_free_apic_id();
1205		if (freeid >= NAPICID)
1206			panic("No free physical APIC IDs found");
1207
1208		if (io_apic_id_acceptable(apic, freeid)) {
1209			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1210			continue;
1211		}
1212		panic("Free physical APIC ID not usable");
1213	}
1214	fix_id_to_io_mapping();
1215}
1216
1217
1218/* Assign low level interrupt handlers */
1219static void
1220setup_apic_irq_mapping(void)
1221{
1222	int	x;
1223	int	int_vector;
1224
1225	/* Clear array */
1226	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1227		int_to_apicintpin[x].ioapic = -1;
1228		int_to_apicintpin[x].int_pin = 0;
1229		int_to_apicintpin[x].apic_address = NULL;
1230		int_to_apicintpin[x].redirindex = 0;
1231	}
1232
1233	/* First assign ISA/EISA interrupts */
1234	for (x = 0; x < nintrs; x++) {
1235		int_vector = io_apic_ints[x].src_bus_irq;
1236		if (int_vector < APIC_INTMAPSIZE &&
1237		    io_apic_ints[x].int_vector == 0xff &&
1238		    int_to_apicintpin[int_vector].ioapic == -1 &&
1239		    (apic_int_is_bus_type(x, ISA) ||
1240		     apic_int_is_bus_type(x, EISA)) &&
1241		    io_apic_ints[x].int_type == 0) {
1242			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1243					io_apic_ints[x].dst_apic_int,
1244					int_vector);
1245		}
1246	}
1247
1248	/* Assign interrupts on first 24 intpins on IOAPIC #0 */
1249	for (x = 0; x < nintrs; x++) {
1250		int_vector = io_apic_ints[x].dst_apic_int;
1251		if (int_vector < APIC_INTMAPSIZE &&
1252		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1253		    io_apic_ints[x].int_vector == 0xff &&
1254		    int_to_apicintpin[int_vector].ioapic == -1 &&
1255		    (io_apic_ints[x].int_type == 0 ||
1256		     io_apic_ints[x].int_type == 3)) {
1257			assign_apic_irq(0,
1258					io_apic_ints[x].dst_apic_int,
1259					int_vector);
1260		}
1261	}
1262	/*
1263	 * Assign interrupts for remaining intpins.
1264	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1265	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1266	 * due to 8254 interrupts not being delivered can reuse that low level
1267	 * interrupt handler.
1268	 */
1269	int_vector = 0;
1270	while (int_vector < APIC_INTMAPSIZE &&
1271	       int_to_apicintpin[int_vector].ioapic != -1)
1272		int_vector++;
1273	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1274		if ((io_apic_ints[x].int_type == 0 ||
1275		     (io_apic_ints[x].int_type == 3 &&
1276		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1277		       io_apic_ints[x].dst_apic_int != 0))) &&
1278		    io_apic_ints[x].int_vector == 0xff) {
1279			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1280					io_apic_ints[x].dst_apic_int,
1281					int_vector);
1282			int_vector++;
1283			while (int_vector < APIC_INTMAPSIZE &&
1284			       int_to_apicintpin[int_vector].ioapic != -1)
1285				int_vector++;
1286		}
1287	}
1288}
1289
1290
1291static int
1292processor_entry(proc_entry_ptr entry, int cpu)
1293{
1294	/* check for usability */
1295	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1296		return 0;
1297
1298	if(entry->apic_id >= NAPICID)
1299		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1300	/* check for BSP flag */
1301	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1302		boot_cpu_id = entry->apic_id;
1303		CPU_TO_ID(0) = entry->apic_id;
1304		ID_TO_CPU(entry->apic_id) = 0;
1305		return 0;	/* its already been counted */
1306	}
1307
1308	/* add another AP to list, if less than max number of CPUs */
1309	else if (cpu < NCPU) {
1310		CPU_TO_ID(cpu) = entry->apic_id;
1311		ID_TO_CPU(entry->apic_id) = cpu;
1312		return 1;
1313	}
1314
1315	return 0;
1316}
1317
1318
1319static int
1320bus_entry(bus_entry_ptr entry, int bus)
1321{
1322	int     x;
1323	char    c, name[8];
1324
1325	/* encode the name into an index */
1326	for (x = 0; x < 6; ++x) {
1327		if ((c = entry->bus_type[x]) == ' ')
1328			break;
1329		name[x] = c;
1330	}
1331	name[x] = '\0';
1332
1333	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1334		panic("unknown bus type: '%s'", name);
1335
1336	bus_data[bus].bus_id = entry->bus_id;
1337	bus_data[bus].bus_type = x;
1338
1339	return 1;
1340}
1341
1342
1343static int
1344io_apic_entry(io_apic_entry_ptr entry, int apic)
1345{
1346	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1347		return 0;
1348
1349	IO_TO_ID(apic) = entry->apic_id;
1350	if (entry->apic_id < NAPICID)
1351		ID_TO_IO(entry->apic_id) = apic;
1352
1353	return 1;
1354}
1355
1356
1357static int
1358lookup_bus_type(char *name)
1359{
1360	int     x;
1361
1362	for (x = 0; x < MAX_BUSTYPE; ++x)
1363		if (strcmp(bus_type_table[x].name, name) == 0)
1364			return bus_type_table[x].type;
1365
1366	return UNKNOWN_BUSTYPE;
1367}
1368
1369
1370static int
1371int_entry(int_entry_ptr entry, int intr)
1372{
1373	int apic;
1374
1375	io_apic_ints[intr].int_type = entry->int_type;
1376	io_apic_ints[intr].int_flags = entry->int_flags;
1377	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1378	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1379	if (entry->dst_apic_id == 255) {
1380		/* This signal goes to all IO APICS.  Select an IO APIC
1381		   with sufficient number of interrupt pins */
1382		for (apic = 0; apic < mp_napics; apic++)
1383			if (((io_apic_read(apic, IOAPIC_VER) &
1384			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1385			    entry->dst_apic_int)
1386				break;
1387		if (apic < mp_napics)
1388			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1389		else
1390			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1391	} else
1392		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1393	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1394
1395	return 1;
1396}
1397
1398
1399static int
1400apic_int_is_bus_type(int intr, int bus_type)
1401{
1402	int     bus;
1403
1404	for (bus = 0; bus < mp_nbusses; ++bus)
1405		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1406		    && ((int) bus_data[bus].bus_type == bus_type))
1407			return 1;
1408
1409	return 0;
1410}
1411
1412
1413/*
1414 * Given a traditional ISA INT mask, return an APIC mask.
1415 */
1416u_int
1417isa_apic_mask(u_int isa_mask)
1418{
1419	int isa_irq;
1420	int apic_pin;
1421
1422#if defined(SKIP_IRQ15_REDIRECT)
1423	if (isa_mask == (1 << 15)) {
1424		printf("skipping ISA IRQ15 redirect\n");
1425		return isa_mask;
1426	}
1427#endif  /* SKIP_IRQ15_REDIRECT */
1428
1429	isa_irq = ffs(isa_mask);		/* find its bit position */
1430	if (isa_irq == 0)			/* doesn't exist */
1431		return 0;
1432	--isa_irq;				/* make it zero based */
1433
1434	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1435	if (apic_pin == -1)
1436		return 0;
1437
1438	return (1 << apic_pin);			/* convert pin# to a mask */
1439}
1440
1441
1442/*
1443 * Determine which APIC pin an ISA/EISA INT is attached to.
1444 */
1445#define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1446#define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1447#define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1448#define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1449
1450#define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1451int
1452isa_apic_irq(int isa_irq)
1453{
1454	int     intr;
1455
1456	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1457		if (INTTYPE(intr) == 0) {		/* standard INT */
1458			if (SRCBUSIRQ(intr) == isa_irq) {
1459				if (apic_int_is_bus_type(intr, ISA) ||
1460			            apic_int_is_bus_type(intr, EISA))
1461					return INTIRQ(intr);	/* found */
1462			}
1463		}
1464	}
1465	return -1;					/* NOT found */
1466}
1467
1468
1469/*
1470 * Determine which APIC pin a PCI INT is attached to.
1471 */
1472#define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1473#define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1474#define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1475int
1476pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1477{
1478	int     intr;
1479
1480	--pciInt;					/* zero based */
1481
1482	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1483		if ((INTTYPE(intr) == 0)		/* standard INT */
1484		    && (SRCBUSID(intr) == pciBus)
1485		    && (SRCBUSDEVICE(intr) == pciDevice)
1486		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1487			if (apic_int_is_bus_type(intr, PCI))
1488				return INTIRQ(intr);	/* exact match */
1489
1490	return -1;					/* NOT found */
1491}
1492
1493int
1494next_apic_irq(int irq)
1495{
1496	int intr, ointr;
1497	int bus, bustype;
1498
1499	bus = 0;
1500	bustype = 0;
1501	for (intr = 0; intr < nintrs; intr++) {
1502		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1503			continue;
1504		bus = SRCBUSID(intr);
1505		bustype = apic_bus_type(bus);
1506		if (bustype != ISA &&
1507		    bustype != EISA &&
1508		    bustype != PCI)
1509			continue;
1510		break;
1511	}
1512	if (intr >= nintrs) {
1513		return -1;
1514	}
1515	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1516		if (INTTYPE(ointr) != 0)
1517			continue;
1518		if (bus != SRCBUSID(ointr))
1519			continue;
1520		if (bustype == PCI) {
1521			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1522				continue;
1523			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1524				continue;
1525		}
1526		if (bustype == ISA || bustype == EISA) {
1527			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1528				continue;
1529		}
1530		if (INTPIN(intr) == INTPIN(ointr))
1531			continue;
1532		break;
1533	}
1534	if (ointr >= nintrs) {
1535		return -1;
1536	}
1537	return INTIRQ(ointr);
1538}
1539#undef SRCBUSLINE
1540#undef SRCBUSDEVICE
1541#undef SRCBUSID
1542#undef SRCBUSIRQ
1543
1544#undef INTPIN
1545#undef INTIRQ
1546#undef INTAPIC
1547#undef INTTYPE
1548
1549
1550/*
1551 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1552 *
1553 * XXX FIXME:
1554 *  Exactly what this means is unclear at this point.  It is a solution
1555 *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1556 *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1557 *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1558 *  option.
1559 */
1560int
1561undirect_isa_irq(int rirq)
1562{
1563#if defined(READY)
1564	if (bootverbose)
1565	    printf("Freeing redirected ISA irq %d.\n", rirq);
1566	/** FIXME: tickle the MB redirector chip */
1567	return ???;
1568#else
1569	if (bootverbose)
1570	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1571	return 0;
1572#endif  /* READY */
1573}
1574
1575
1576/*
1577 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1578 */
1579int
1580undirect_pci_irq(int rirq)
1581{
1582#if defined(READY)
1583	if (bootverbose)
1584		printf("Freeing redirected PCI irq %d.\n", rirq);
1585
1586	/** FIXME: tickle the MB redirector chip */
1587	return ???;
1588#else
1589	if (bootverbose)
1590		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1591		       rirq);
1592	return 0;
1593#endif  /* READY */
1594}
1595
1596
1597/*
1598 * given a bus ID, return:
1599 *  the bus type if found
1600 *  -1 if NOT found
1601 */
1602int
1603apic_bus_type(int id)
1604{
1605	int     x;
1606
1607	for (x = 0; x < mp_nbusses; ++x)
1608		if (bus_data[x].bus_id == id)
1609			return bus_data[x].bus_type;
1610
1611	return -1;
1612}
1613
1614
1615/*
1616 * given a LOGICAL APIC# and pin#, return:
1617 *  the associated src bus ID if found
1618 *  -1 if NOT found
1619 */
1620int
1621apic_src_bus_id(int apic, int pin)
1622{
1623	int     x;
1624
1625	/* search each of the possible INTerrupt sources */
1626	for (x = 0; x < nintrs; ++x)
1627		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1628		    (pin == io_apic_ints[x].dst_apic_int))
1629			return (io_apic_ints[x].src_bus_id);
1630
1631	return -1;		/* NOT found */
1632}
1633
1634
1635/*
1636 * given a LOGICAL APIC# and pin#, return:
1637 *  the associated src bus IRQ if found
1638 *  -1 if NOT found
1639 */
1640int
1641apic_src_bus_irq(int apic, int pin)
1642{
1643	int     x;
1644
1645	for (x = 0; x < nintrs; x++)
1646		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1647		    (pin == io_apic_ints[x].dst_apic_int))
1648			return (io_apic_ints[x].src_bus_irq);
1649
1650	return -1;		/* NOT found */
1651}
1652
1653
1654/*
1655 * given a LOGICAL APIC# and pin#, return:
1656 *  the associated INTerrupt type if found
1657 *  -1 if NOT found
1658 */
1659int
1660apic_int_type(int apic, int pin)
1661{
1662	int     x;
1663
1664	/* search each of the possible INTerrupt sources */
1665	for (x = 0; x < nintrs; ++x)
1666		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1667		    (pin == io_apic_ints[x].dst_apic_int))
1668			return (io_apic_ints[x].int_type);
1669
1670	return -1;		/* NOT found */
1671}
1672
1673int
1674apic_irq(int apic, int pin)
1675{
1676	int x;
1677	int res;
1678
1679	for (x = 0; x < nintrs; ++x)
1680		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1681		    (pin == io_apic_ints[x].dst_apic_int)) {
1682			res = io_apic_ints[x].int_vector;
1683			if (res == 0xff)
1684				return -1;
1685			if (apic != int_to_apicintpin[res].ioapic)
1686				panic("apic_irq: inconsistent table");
1687			if (pin != int_to_apicintpin[res].int_pin)
1688				panic("apic_irq inconsistent table (2)");
1689			return res;
1690		}
1691	return -1;
1692}
1693
1694
1695/*
1696 * given a LOGICAL APIC# and pin#, return:
1697 *  the associated trigger mode if found
1698 *  -1 if NOT found
1699 */
1700int
1701apic_trigger(int apic, int pin)
1702{
1703	int     x;
1704
1705	/* search each of the possible INTerrupt sources */
1706	for (x = 0; x < nintrs; ++x)
1707		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1708		    (pin == io_apic_ints[x].dst_apic_int))
1709			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1710
1711	return -1;		/* NOT found */
1712}
1713
1714
1715/*
1716 * given a LOGICAL APIC# and pin#, return:
1717 *  the associated 'active' level if found
1718 *  -1 if NOT found
1719 */
1720int
1721apic_polarity(int apic, int pin)
1722{
1723	int     x;
1724
1725	/* search each of the possible INTerrupt sources */
1726	for (x = 0; x < nintrs; ++x)
1727		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1728		    (pin == io_apic_ints[x].dst_apic_int))
1729			return (io_apic_ints[x].int_flags & 0x03);
1730
1731	return -1;		/* NOT found */
1732}
1733
1734
1735/*
1736 * set data according to MP defaults
1737 * FIXME: probably not complete yet...
1738 */
1739static void
1740default_mp_table(int type)
1741{
1742	int     ap_cpu_id;
1743#if defined(APIC_IO)
1744	int     io_apic_id;
1745	int     pin;
1746#endif	/* APIC_IO */
1747
1748#if 0
1749	printf("  MP default config type: %d\n", type);
1750	switch (type) {
1751	case 1:
1752		printf("   bus: ISA, APIC: 82489DX\n");
1753		break;
1754	case 2:
1755		printf("   bus: EISA, APIC: 82489DX\n");
1756		break;
1757	case 3:
1758		printf("   bus: EISA, APIC: 82489DX\n");
1759		break;
1760	case 4:
1761		printf("   bus: MCA, APIC: 82489DX\n");
1762		break;
1763	case 5:
1764		printf("   bus: ISA+PCI, APIC: Integrated\n");
1765		break;
1766	case 6:
1767		printf("   bus: EISA+PCI, APIC: Integrated\n");
1768		break;
1769	case 7:
1770		printf("   bus: MCA+PCI, APIC: Integrated\n");
1771		break;
1772	default:
1773		printf("   future type\n");
1774		break;
1775		/* NOTREACHED */
1776	}
1777#endif	/* 0 */
1778
1779	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1780	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1781
1782	/* BSP */
1783	CPU_TO_ID(0) = boot_cpu_id;
1784	ID_TO_CPU(boot_cpu_id) = 0;
1785
1786	/* one and only AP */
1787	CPU_TO_ID(1) = ap_cpu_id;
1788	ID_TO_CPU(ap_cpu_id) = 1;
1789
1790#if defined(APIC_IO)
1791	/* one and only IO APIC */
1792	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1793
1794	/*
1795	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1796	 * necessary as some hardware isn't properly setting up the IO APIC
1797	 */
1798#if defined(REALLY_ANAL_IOAPICID_VALUE)
1799	if (io_apic_id != 2) {
1800#else
1801	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1802#endif	/* REALLY_ANAL_IOAPICID_VALUE */
1803		io_apic_set_id(0, 2);
1804		io_apic_id = 2;
1805	}
1806	IO_TO_ID(0) = io_apic_id;
1807	ID_TO_IO(io_apic_id) = 0;
1808#endif	/* APIC_IO */
1809
1810	/* fill out bus entries */
1811	switch (type) {
1812	case 1:
1813	case 2:
1814	case 3:
1815	case 4:
1816	case 5:
1817	case 6:
1818	case 7:
1819		bus_data[0].bus_id = default_data[type - 1][1];
1820		bus_data[0].bus_type = default_data[type - 1][2];
1821		bus_data[1].bus_id = default_data[type - 1][3];
1822		bus_data[1].bus_type = default_data[type - 1][4];
1823		break;
1824
1825	/* case 4: case 7:		   MCA NOT supported */
1826	default:		/* illegal/reserved */
1827		panic("BAD default MP config: %d", type);
1828		/* NOTREACHED */
1829	}
1830
1831#if defined(APIC_IO)
1832	/* general cases from MP v1.4, table 5-2 */
1833	for (pin = 0; pin < 16; ++pin) {
1834		io_apic_ints[pin].int_type = 0;
1835		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1836		io_apic_ints[pin].src_bus_id = 0;
1837		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1838		io_apic_ints[pin].dst_apic_id = io_apic_id;
1839		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1840	}
1841
1842	/* special cases from MP v1.4, table 5-2 */
1843	if (type == 2) {
1844		io_apic_ints[2].int_type = 0xff;	/* N/C */
1845		io_apic_ints[13].int_type = 0xff;	/* N/C */
1846#if !defined(APIC_MIXED_MODE)
1847		/** FIXME: ??? */
1848		panic("sorry, can't support type 2 default yet");
1849#endif	/* APIC_MIXED_MODE */
1850	}
1851	else
1852		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1853
1854	if (type == 7)
1855		io_apic_ints[0].int_type = 0xff;	/* N/C */
1856	else
1857		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1858#endif	/* APIC_IO */
1859}
1860
1861
1862/*
1863 * initialize all the SMP locks
1864 */
1865
1866/* critical region around IO APIC, apic_imen */
1867struct simplelock	imen_lock;
1868
1869/* critical region around splxx(), cpl, cml, cil, ipending */
1870struct simplelock	cpl_lock;
1871
1872/* Make FAST_INTR() routines sequential */
1873struct simplelock	fast_intr_lock;
1874
1875/* critical region around INTR() routines */
1876struct simplelock	intr_lock;
1877
1878/* lock regions protected in UP kernel via cli/sti */
1879struct simplelock	mpintr_lock;
1880
1881/* lock region used by kernel profiling */
1882struct simplelock	mcount_lock;
1883
1884#ifdef USE_COMLOCK
1885/* locks com (tty) data/hardware accesses: a FASTINTR() */
1886struct simplelock	com_lock;
1887#endif /* USE_COMLOCK */
1888
1889#ifdef USE_CLOCKLOCK
1890/* lock regions around the clock hardware */
1891struct simplelock	clock_lock;
1892#endif /* USE_CLOCKLOCK */
1893
1894/* lock around the MP rendezvous */
1895static struct simplelock smp_rv_lock;
1896
1897static void
1898init_locks(void)
1899{
1900	/*
1901	 * Get the initial mp_lock with a count of 1 for the BSP.
1902	 * This uses a LOGICAL cpu ID, ie BSP == 0.
1903	 */
1904	mp_lock = 0x00000001;
1905
1906#if 0
1907	/* ISR uses its own "giant lock" */
1908	isr_lock = FREE_LOCK;
1909#endif
1910
1911#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1912	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1913#endif
1914
1915	s_lock_init((struct simplelock*)&mpintr_lock);
1916
1917	s_lock_init((struct simplelock*)&mcount_lock);
1918
1919	s_lock_init((struct simplelock*)&fast_intr_lock);
1920	s_lock_init((struct simplelock*)&intr_lock);
1921	s_lock_init((struct simplelock*)&imen_lock);
1922	s_lock_init((struct simplelock*)&cpl_lock);
1923	s_lock_init(&smp_rv_lock);
1924
1925#ifdef USE_COMLOCK
1926	s_lock_init((struct simplelock*)&com_lock);
1927#endif /* USE_COMLOCK */
1928#ifdef USE_CLOCKLOCK
1929	s_lock_init((struct simplelock*)&clock_lock);
1930#endif /* USE_CLOCKLOCK */
1931}
1932
1933
1934/* Wait for all APs to be fully initialized */
1935extern int wait_ap(unsigned int);
1936
1937/*
1938 * start each AP in our list
1939 */
1940static int
1941start_all_aps(u_int boot_addr)
1942{
1943	int     x, i, pg;
1944	u_char  mpbiosreason;
1945	u_long  mpbioswarmvec;
1946	struct globaldata *gd;
1947	char *stack;
1948
1949	POSTCODE(START_ALL_APS_POST);
1950
1951	/* initialize BSP's local APIC */
1952	apic_initialize();
1953	bsp_apic_ready = 1;
1954
1955	/* install the AP 1st level boot code */
1956	install_ap_tramp(boot_addr);
1957
1958
1959	/* save the current value of the warm-start vector */
1960	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1961#ifndef PC98
1962	outb(CMOS_REG, BIOS_RESET);
1963	mpbiosreason = inb(CMOS_DATA);
1964#endif
1965
1966	/* record BSP in CPU map */
1967	all_cpus = 1;
1968
1969	/* set up 0 -> 4MB P==V mapping for AP boot */
1970	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1971	invltlb();
1972
1973	/* start each AP */
1974	for (x = 1; x <= mp_naps; ++x) {
1975
1976		/* This is a bit verbose, it will go away soon.  */
1977
1978		/* first page of AP's private space */
1979		pg = x * i386_btop(sizeof(struct privatespace));
1980
1981		/* allocate a new private data page */
1982		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1983
1984		/* wire it into the private page table page */
1985		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1986
1987		/* allocate and set up an idle stack data page */
1988		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1989		for (i = 0; i < UPAGES; i++)
1990			SMPpt[pg + 5 + i] = (pt_entry_t)
1991			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1992
1993		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1994		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1995		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1996		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1997
1998		/* prime data page for it to use */
1999		gd->gd_cpuid = x;
2000		gd->gd_cpu_lockid = x << 24;
2001		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
2002		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
2003		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
2004		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
2005		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
2006		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
2007		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
2008		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
2009
2010		/* setup a vector to our boot code */
2011		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2012		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2013#ifndef PC98
2014		outb(CMOS_REG, BIOS_RESET);
2015		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2016#endif
2017
2018		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2019		bootAP = x;
2020
2021		/* attempt to start the Application Processor */
2022		CHECK_INIT(99);	/* setup checkpoints */
2023		if (!start_ap(x, boot_addr)) {
2024			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2025			CHECK_PRINT("trace");	/* show checkpoints */
2026			/* better panic as the AP may be running loose */
2027			printf("panic y/n? [y] ");
2028			if (cngetc() != 'n')
2029				panic("bye-bye");
2030		}
2031		CHECK_PRINT("trace");		/* show checkpoints */
2032
2033		/* record its version info */
2034		cpu_apic_versions[x] = cpu_apic_versions[0];
2035
2036		all_cpus |= (1 << x);		/* record AP in CPU map */
2037	}
2038
2039	/* build our map of 'other' CPUs */
2040	other_cpus = all_cpus & ~(1 << cpuid);
2041
2042	/* fill in our (BSP) APIC version */
2043	cpu_apic_versions[0] = lapic.version;
2044
2045	/* restore the warmstart vector */
2046	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2047#ifndef PC98
2048	outb(CMOS_REG, BIOS_RESET);
2049	outb(CMOS_DATA, mpbiosreason);
2050#endif
2051
2052	/*
2053	 * Set up the idle context for the BSP.  Similar to above except
2054	 * that some was done by locore, some by pmap.c and some is implicit
2055	 * because the BSP is cpu#0 and the page is initially zero, and also
2056	 * because we can refer to variables by name on the BSP..
2057	 */
2058
2059	/* Allocate and setup BSP idle stack */
2060	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2061	for (i = 0; i < UPAGES; i++)
2062		SMPpt[5 + i] = (pt_entry_t)
2063		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2064
2065	*(int *)PTD = 0;
2066	pmap_set_opt();
2067
2068	/* number of APs actually started */
2069	return mp_ncpus - 1;
2070}
2071
2072
2073/*
2074 * load the 1st level AP boot code into base memory.
2075 */
2076
2077/* targets for relocation */
2078extern void bigJump(void);
2079extern void bootCodeSeg(void);
2080extern void bootDataSeg(void);
2081extern void MPentry(void);
2082extern u_int MP_GDT;
2083extern u_int mp_gdtbase;
2084
2085static void
2086install_ap_tramp(u_int boot_addr)
2087{
2088	int     x;
2089	int     size = *(int *) ((u_long) & bootMP_size);
2090	u_char *src = (u_char *) ((u_long) bootMP);
2091	u_char *dst = (u_char *) boot_addr + KERNBASE;
2092	u_int   boot_base = (u_int) bootMP;
2093	u_int8_t *dst8;
2094	u_int16_t *dst16;
2095	u_int32_t *dst32;
2096
2097	POSTCODE(INSTALL_AP_TRAMP_POST);
2098
2099	for (x = 0; x < size; ++x)
2100		*dst++ = *src++;
2101
2102	/*
2103	 * modify addresses in code we just moved to basemem. unfortunately we
2104	 * need fairly detailed info about mpboot.s for this to work.  changes
2105	 * to mpboot.s might require changes here.
2106	 */
2107
2108	/* boot code is located in KERNEL space */
2109	dst = (u_char *) boot_addr + KERNBASE;
2110
2111	/* modify the lgdt arg */
2112	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2113	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2114
2115	/* modify the ljmp target for MPentry() */
2116	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2117	*dst32 = ((u_int) MPentry - KERNBASE);
2118
2119	/* modify the target for boot code segment */
2120	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2121	dst8 = (u_int8_t *) (dst16 + 1);
2122	*dst16 = (u_int) boot_addr & 0xffff;
2123	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2124
2125	/* modify the target for boot data segment */
2126	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2127	dst8 = (u_int8_t *) (dst16 + 1);
2128	*dst16 = (u_int) boot_addr & 0xffff;
2129	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2130}
2131
2132
2133/*
2134 * this function starts the AP (application processor) identified
2135 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2136 * to accomplish this.  This is necessary because of the nuances
2137 * of the different hardware we might encounter.  It ain't pretty,
2138 * but it seems to work.
2139 */
2140static int
2141start_ap(int logical_cpu, u_int boot_addr)
2142{
2143	int     physical_cpu;
2144	int     vector;
2145	int     cpus;
2146	u_long  icr_lo, icr_hi;
2147
2148	POSTCODE(START_AP_POST);
2149
2150	/* get the PHYSICAL APIC ID# */
2151	physical_cpu = CPU_TO_ID(logical_cpu);
2152
2153	/* calculate the vector */
2154	vector = (boot_addr >> 12) & 0xff;
2155
2156	/* used as a watchpoint to signal AP startup */
2157	cpus = mp_ncpus;
2158
2159	/*
2160	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2161	 * and running the target CPU. OR this INIT IPI might be latched (P5
2162	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2163	 * ignored.
2164	 */
2165
2166	/* setup the address for the target AP */
2167	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2168	icr_hi |= (physical_cpu << 24);
2169	lapic.icr_hi = icr_hi;
2170
2171	/* do an INIT IPI: assert RESET */
2172	icr_lo = lapic.icr_lo & 0xfff00000;
2173	lapic.icr_lo = icr_lo | 0x0000c500;
2174
2175	/* wait for pending status end */
2176	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2177		 /* spin */ ;
2178
2179	/* do an INIT IPI: deassert RESET */
2180	lapic.icr_lo = icr_lo | 0x00008500;
2181
2182	/* wait for pending status end */
2183	u_sleep(10000);		/* wait ~10mS */
2184	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2185		 /* spin */ ;
2186
2187	/*
2188	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2189	 * latched, (P5 bug) this 1st STARTUP would then terminate
2190	 * immediately, and the previously started INIT IPI would continue. OR
2191	 * the previous INIT IPI has already run. and this STARTUP IPI will
2192	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2193	 * will run.
2194	 */
2195
2196	/* do a STARTUP IPI */
2197	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2198	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2199		 /* spin */ ;
2200	u_sleep(200);		/* wait ~200uS */
2201
2202	/*
2203	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2204	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2205	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2206	 * recognized after hardware RESET or INIT IPI.
2207	 */
2208
2209	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2210	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2211		 /* spin */ ;
2212	u_sleep(200);		/* wait ~200uS */
2213
2214	/* wait for it to start */
2215	set_apic_timer(5000000);/* == 5 seconds */
2216	while (read_apic_timer())
2217		if (mp_ncpus > cpus)
2218			return 1;	/* return SUCCESS */
2219
2220	return 0;		/* return FAILURE */
2221}
2222
2223
2224/*
2225 * Flush the TLB on all other CPU's
2226 *
2227 * XXX: Needs to handshake and wait for completion before proceding.
2228 */
2229void
2230smp_invltlb(void)
2231{
2232#if defined(APIC_IO)
2233	if (smp_started && invltlb_ok)
2234		all_but_self_ipi(XINVLTLB_OFFSET);
2235#endif  /* APIC_IO */
2236}
2237
2238void
2239invlpg(u_int addr)
2240{
2241	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2242
2243	/* send a message to the other CPUs */
2244	smp_invltlb();
2245}
2246
2247void
2248invltlb(void)
2249{
2250	u_long  temp;
2251
2252	/*
2253	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2254	 * inlined.
2255	 */
2256	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2257
2258	/* send a message to the other CPUs */
2259	smp_invltlb();
2260}
2261
2262
2263/*
2264 * When called the executing CPU will send an IPI to all other CPUs
2265 *  requesting that they halt execution.
2266 *
2267 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2268 *
2269 *  - Signals all CPUs in map to stop.
2270 *  - Waits for each to stop.
2271 *
2272 * Returns:
2273 *  -1: error
2274 *   0: NA
2275 *   1: ok
2276 *
2277 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2278 *            from executing at same time.
2279 */
2280int
2281stop_cpus(u_int map)
2282{
2283	if (!smp_started)
2284		return 0;
2285
2286	/* send the Xcpustop IPI to all CPUs in map */
2287	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2288
2289	while ((stopped_cpus & map) != map)
2290		/* spin */ ;
2291
2292	return 1;
2293}
2294
2295
2296/*
2297 * Called by a CPU to restart stopped CPUs.
2298 *
2299 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2300 *
2301 *  - Signals all CPUs in map to restart.
2302 *  - Waits for each to restart.
2303 *
2304 * Returns:
2305 *  -1: error
2306 *   0: NA
2307 *   1: ok
2308 */
2309int
2310restart_cpus(u_int map)
2311{
2312	if (!smp_started)
2313		return 0;
2314
2315	started_cpus = map;		/* signal other cpus to restart */
2316
2317	while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2318		/* spin */ ;
2319
2320	return 1;
2321}
2322
2323int smp_active = 0;	/* are the APs allowed to run? */
2324SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2325
2326/* XXX maybe should be hw.ncpu */
2327static int smp_cpus = 1;	/* how many cpu's running */
2328SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2329
2330int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2331SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2332
2333/* Warning: Do not staticize.  Used from swtch.s */
2334int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2335SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2336	   &do_page_zero_idle, 0, "");
2337
2338/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2339int forward_irq_enabled = 1;
2340SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2341	   &forward_irq_enabled, 0, "");
2342
2343/* Enable forwarding of a signal to a process running on a different CPU */
2344static int forward_signal_enabled = 1;
2345SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2346	   &forward_signal_enabled, 0, "");
2347
2348/* Enable forwarding of roundrobin to all other cpus */
2349static int forward_roundrobin_enabled = 1;
2350SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2351	   &forward_roundrobin_enabled, 0, "");
2352
2353/*
2354 * This is called once the rest of the system is up and running and we're
2355 * ready to let the AP's out of the pen.
2356 */
2357void ap_init(void);
2358
2359void
2360ap_init()
2361{
2362	u_int	apic_id;
2363
2364	/* BSP may have changed PTD while we're waiting for the lock */
2365	cpu_invltlb();
2366
2367	smp_cpus++;
2368
2369#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2370	lidt(&r_idt);
2371#endif
2372
2373	/* Build our map of 'other' CPUs. */
2374	other_cpus = all_cpus & ~(1 << cpuid);
2375
2376	printf("SMP: AP CPU #%d Launched!\n", cpuid);
2377
2378	/* XXX FIXME: i386 specific, and redundant: Setup the FPU. */
2379	load_cr0((rcr0() & ~CR0_EM) | CR0_MP | CR0_NE | CR0_TS);
2380
2381	/* set up FPU state on the AP */
2382	npxinit(__INITIAL_NPXCW__);
2383
2384	/* A quick check from sanity claus */
2385	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2386	if (cpuid != apic_id) {
2387		printf("SMP: cpuid = %d\n", cpuid);
2388		printf("SMP: apic_id = %d\n", apic_id);
2389		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2390		panic("cpuid mismatch! boom!!");
2391	}
2392
2393	/* Init local apic for irq's */
2394	apic_initialize();
2395
2396	/* Set memory range attributes for this CPU to match the BSP */
2397	mem_range_AP_init();
2398
2399	/*
2400	 * Activate smp_invltlb, although strictly speaking, this isn't
2401	 * quite correct yet.  We should have a bitfield for cpus willing
2402	 * to accept TLB flush IPI's or something and sync them.
2403	 */
2404	if (smp_cpus == mp_ncpus) {
2405		invltlb_ok = 1;
2406		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2407		smp_active = 1;	 /* historic */
2408	}
2409}
2410
2411#ifdef BETTER_CLOCK
2412
2413#define CHECKSTATE_USER	0
2414#define CHECKSTATE_SYS	1
2415#define CHECKSTATE_INTR	2
2416
2417/* Do not staticize.  Used from apic_vector.s */
2418struct proc*	checkstate_curproc[NCPU];
2419int		checkstate_cpustate[NCPU];
2420u_long		checkstate_pc[NCPU];
2421
2422extern long	cp_time[CPUSTATES];
2423
2424#define PC_TO_INDEX(pc, prof)				\
2425        ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2426            (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2427
2428static void
2429addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2430{
2431	int i;
2432	struct uprof *prof;
2433	u_long pc;
2434
2435	pc = checkstate_pc[id];
2436	prof = &p->p_stats->p_prof;
2437	if (pc >= prof->pr_off &&
2438	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2439		if ((p->p_flag & P_OWEUPC) == 0) {
2440			prof->pr_addr = pc;
2441			prof->pr_ticks = 1;
2442			p->p_flag |= P_OWEUPC;
2443		}
2444		*astmap |= (1 << id);
2445	}
2446}
2447
2448static void
2449forwarded_statclock(int id, int pscnt, int *astmap)
2450{
2451	struct pstats *pstats;
2452	long rss;
2453	struct rusage *ru;
2454	struct vmspace *vm;
2455	int cpustate;
2456	struct proc *p;
2457#ifdef GPROF
2458	register struct gmonparam *g;
2459	int i;
2460#endif
2461
2462	p = checkstate_curproc[id];
2463	cpustate = checkstate_cpustate[id];
2464
2465	switch (cpustate) {
2466	case CHECKSTATE_USER:
2467		if (p->p_flag & P_PROFIL)
2468			addupc_intr_forwarded(p, id, astmap);
2469		if (pscnt > 1)
2470			return;
2471		p->p_uticks++;
2472		if (p->p_nice > NZERO)
2473			cp_time[CP_NICE]++;
2474		else
2475			cp_time[CP_USER]++;
2476		break;
2477	case CHECKSTATE_SYS:
2478#ifdef GPROF
2479		/*
2480		 * Kernel statistics are just like addupc_intr, only easier.
2481		 */
2482		g = &_gmonparam;
2483		if (g->state == GMON_PROF_ON) {
2484			i = checkstate_pc[id] - g->lowpc;
2485			if (i < g->textsize) {
2486				i /= HISTFRACTION * sizeof(*g->kcount);
2487				g->kcount[i]++;
2488			}
2489		}
2490#endif
2491		if (pscnt > 1)
2492			return;
2493
2494		if (!p)
2495			cp_time[CP_IDLE]++;
2496		else {
2497			p->p_sticks++;
2498			cp_time[CP_SYS]++;
2499		}
2500		break;
2501	case CHECKSTATE_INTR:
2502	default:
2503#ifdef GPROF
2504		/*
2505		 * Kernel statistics are just like addupc_intr, only easier.
2506		 */
2507		g = &_gmonparam;
2508		if (g->state == GMON_PROF_ON) {
2509			i = checkstate_pc[id] - g->lowpc;
2510			if (i < g->textsize) {
2511				i /= HISTFRACTION * sizeof(*g->kcount);
2512				g->kcount[i]++;
2513			}
2514		}
2515#endif
2516		if (pscnt > 1)
2517			return;
2518		if (p)
2519			p->p_iticks++;
2520		cp_time[CP_INTR]++;
2521	}
2522	if (p != NULL) {
2523		schedclock(p);
2524
2525		/* Update resource usage integrals and maximums. */
2526		if ((pstats = p->p_stats) != NULL &&
2527		    (ru = &pstats->p_ru) != NULL &&
2528		    (vm = p->p_vmspace) != NULL) {
2529			ru->ru_ixrss += pgtok(vm->vm_tsize);
2530			ru->ru_idrss += pgtok(vm->vm_dsize);
2531			ru->ru_isrss += pgtok(vm->vm_ssize);
2532			rss = pgtok(vmspace_resident_count(vm));
2533			if (ru->ru_maxrss < rss)
2534				ru->ru_maxrss = rss;
2535        	}
2536	}
2537}
2538
2539void
2540forward_statclock(int pscnt)
2541{
2542	int map;
2543	int id;
2544	int i;
2545
2546	/* Kludge. We don't yet have separate locks for the interrupts
2547	 * and the kernel. This means that we cannot let the other processors
2548	 * handle complex interrupts while inhibiting them from entering
2549	 * the kernel in a non-interrupt context.
2550	 *
2551	 * What we can do, without changing the locking mechanisms yet,
2552	 * is letting the other processors handle a very simple interrupt
2553	 * (wich determines the processor states), and do the main
2554	 * work ourself.
2555	 */
2556
2557	if (!smp_started || !invltlb_ok || cold || panicstr)
2558		return;
2559
2560	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2561
2562	map = other_cpus & ~stopped_cpus ;
2563	checkstate_probed_cpus = 0;
2564	if (map != 0)
2565		selected_apic_ipi(map,
2566				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2567
2568	i = 0;
2569	while (checkstate_probed_cpus != map) {
2570		/* spin */
2571		i++;
2572		if (i == 100000) {
2573#ifdef BETTER_CLOCK_DIAGNOSTIC
2574			printf("forward_statclock: checkstate %x\n",
2575			       checkstate_probed_cpus);
2576#endif
2577			break;
2578		}
2579	}
2580
2581	/*
2582	 * Step 2: walk through other processors processes, update ticks and
2583	 * profiling info.
2584	 */
2585
2586	map = 0;
2587	for (id = 0; id < mp_ncpus; id++) {
2588		if (id == cpuid)
2589			continue;
2590		if (((1 << id) & checkstate_probed_cpus) == 0)
2591			continue;
2592		forwarded_statclock(id, pscnt, &map);
2593	}
2594	if (map != 0) {
2595		checkstate_need_ast |= map;
2596		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2597		i = 0;
2598		while ((checkstate_need_ast & map) != 0) {
2599			/* spin */
2600			i++;
2601			if (i > 100000) {
2602#ifdef BETTER_CLOCK_DIAGNOSTIC
2603				printf("forward_statclock: dropped ast 0x%x\n",
2604				       checkstate_need_ast & map);
2605#endif
2606				break;
2607			}
2608		}
2609	}
2610}
2611
2612void
2613forward_hardclock(int pscnt)
2614{
2615	int map;
2616	int id;
2617	struct proc *p;
2618	struct pstats *pstats;
2619	int i;
2620
2621	/* Kludge. We don't yet have separate locks for the interrupts
2622	 * and the kernel. This means that we cannot let the other processors
2623	 * handle complex interrupts while inhibiting them from entering
2624	 * the kernel in a non-interrupt context.
2625	 *
2626	 * What we can do, without changing the locking mechanisms yet,
2627	 * is letting the other processors handle a very simple interrupt
2628	 * (wich determines the processor states), and do the main
2629	 * work ourself.
2630	 */
2631
2632	if (!smp_started || !invltlb_ok || cold || panicstr)
2633		return;
2634
2635	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2636
2637	map = other_cpus & ~stopped_cpus ;
2638	checkstate_probed_cpus = 0;
2639	if (map != 0)
2640		selected_apic_ipi(map,
2641				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2642
2643	i = 0;
2644	while (checkstate_probed_cpus != map) {
2645		/* spin */
2646		i++;
2647		if (i == 100000) {
2648#ifdef BETTER_CLOCK_DIAGNOSTIC
2649			printf("forward_hardclock: checkstate %x\n",
2650			       checkstate_probed_cpus);
2651#endif
2652			break;
2653		}
2654	}
2655
2656	/*
2657	 * Step 2: walk through other processors processes, update virtual
2658	 * timer and profiling timer. If stathz == 0, also update ticks and
2659	 * profiling info.
2660	 */
2661
2662	map = 0;
2663	for (id = 0; id < mp_ncpus; id++) {
2664		if (id == cpuid)
2665			continue;
2666		if (((1 << id) & checkstate_probed_cpus) == 0)
2667			continue;
2668		p = checkstate_curproc[id];
2669		if (p) {
2670			pstats = p->p_stats;
2671			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2672			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2673			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2674				psignal(p, SIGVTALRM);
2675				map |= (1 << id);
2676			}
2677			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2678			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2679				psignal(p, SIGPROF);
2680				map |= (1 << id);
2681			}
2682		}
2683		if (stathz == 0) {
2684			forwarded_statclock( id, pscnt, &map);
2685		}
2686	}
2687	if (map != 0) {
2688		checkstate_need_ast |= map;
2689		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2690		i = 0;
2691		while ((checkstate_need_ast & map) != 0) {
2692			/* spin */
2693			i++;
2694			if (i > 100000) {
2695#ifdef BETTER_CLOCK_DIAGNOSTIC
2696				printf("forward_hardclock: dropped ast 0x%x\n",
2697				       checkstate_need_ast & map);
2698#endif
2699				break;
2700			}
2701		}
2702	}
2703}
2704
2705#endif /* BETTER_CLOCK */
2706
2707void
2708forward_signal(struct proc *p)
2709{
2710	int map;
2711	int id;
2712	int i;
2713
2714	/* Kludge. We don't yet have separate locks for the interrupts
2715	 * and the kernel. This means that we cannot let the other processors
2716	 * handle complex interrupts while inhibiting them from entering
2717	 * the kernel in a non-interrupt context.
2718	 *
2719	 * What we can do, without changing the locking mechanisms yet,
2720	 * is letting the other processors handle a very simple interrupt
2721	 * (wich determines the processor states), and do the main
2722	 * work ourself.
2723	 */
2724
2725	if (!smp_started || !invltlb_ok || cold || panicstr)
2726		return;
2727	if (!forward_signal_enabled)
2728		return;
2729	while (1) {
2730		if (p->p_stat != SRUN)
2731			return;
2732		id = p->p_oncpu;
2733		if (id == 0xff)
2734			return;
2735		map = (1<<id);
2736		checkstate_need_ast |= map;
2737		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2738		i = 0;
2739		while ((checkstate_need_ast & map) != 0) {
2740			/* spin */
2741			i++;
2742			if (i > 100000) {
2743#if 0
2744				printf("forward_signal: dropped ast 0x%x\n",
2745				       checkstate_need_ast & map);
2746#endif
2747				break;
2748			}
2749		}
2750		if (id == p->p_oncpu)
2751			return;
2752	}
2753}
2754
2755void
2756forward_roundrobin(void)
2757{
2758	u_int map;
2759	int i;
2760
2761	if (!smp_started || !invltlb_ok || cold || panicstr)
2762		return;
2763	if (!forward_roundrobin_enabled)
2764		return;
2765	resched_cpus |= other_cpus;
2766	map = other_cpus & ~stopped_cpus ;
2767#if 1
2768	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2769#else
2770	(void) all_but_self_ipi(XCPUAST_OFFSET);
2771#endif
2772	i = 0;
2773	while ((checkstate_need_ast & map) != 0) {
2774		/* spin */
2775		i++;
2776		if (i > 100000) {
2777#if 0
2778			printf("forward_roundrobin: dropped ast 0x%x\n",
2779			       checkstate_need_ast & map);
2780#endif
2781			break;
2782		}
2783	}
2784}
2785
2786
2787#ifdef APIC_INTR_REORDER
2788/*
2789 *	Maintain mapping from softintr vector to isr bit in local apic.
2790 */
2791void
2792set_lapic_isrloc(int intr, int vector)
2793{
2794	if (intr < 0 || intr > 32)
2795		panic("set_apic_isrloc: bad intr argument: %d",intr);
2796	if (vector < ICU_OFFSET || vector > 255)
2797		panic("set_apic_isrloc: bad vector argument: %d",vector);
2798	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2799	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2800}
2801#endif
2802
2803/*
2804 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2805 * (if specified), rendezvous, execute the action function (if specified),
2806 * rendezvous again, execute the teardown function (if specified), and then
2807 * resume.
2808 *
2809 * Note that the supplied external functions _must_ be reentrant and aware
2810 * that they are running in parallel and in an unknown lock context.
2811 */
2812static void (*smp_rv_setup_func)(void *arg);
2813static void (*smp_rv_action_func)(void *arg);
2814static void (*smp_rv_teardown_func)(void *arg);
2815static void *smp_rv_func_arg;
2816static volatile int smp_rv_waiters[2];
2817
2818void
2819smp_rendezvous_action(void)
2820{
2821	/* setup function */
2822	if (smp_rv_setup_func != NULL)
2823		smp_rv_setup_func(smp_rv_func_arg);
2824	/* spin on entry rendezvous */
2825	atomic_add_int(&smp_rv_waiters[0], 1);
2826	while (smp_rv_waiters[0] < mp_ncpus)
2827		;
2828	/* action function */
2829	if (smp_rv_action_func != NULL)
2830		smp_rv_action_func(smp_rv_func_arg);
2831	/* spin on exit rendezvous */
2832	atomic_add_int(&smp_rv_waiters[1], 1);
2833	while (smp_rv_waiters[1] < mp_ncpus)
2834		;
2835	/* teardown function */
2836	if (smp_rv_teardown_func != NULL)
2837		smp_rv_teardown_func(smp_rv_func_arg);
2838}
2839
2840void
2841smp_rendezvous(void (* setup_func)(void *),
2842	       void (* action_func)(void *),
2843	       void (* teardown_func)(void *),
2844	       void *arg)
2845{
2846	u_int	efl;
2847
2848	/* obtain rendezvous lock */
2849	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2850
2851	/* set static function pointers */
2852	smp_rv_setup_func = setup_func;
2853	smp_rv_action_func = action_func;
2854	smp_rv_teardown_func = teardown_func;
2855	smp_rv_func_arg = arg;
2856	smp_rv_waiters[0] = 0;
2857	smp_rv_waiters[1] = 0;
2858
2859	/* disable interrupts on this CPU, save interrupt status */
2860	efl = read_eflags();
2861	write_eflags(efl & ~PSL_I);
2862
2863	/* signal other processors, which will enter the IPI with interrupts off */
2864	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2865
2866	/* call executor function */
2867	smp_rendezvous_action();
2868
2869	/* restore interrupt flag */
2870	write_eflags(efl);
2871
2872	/* release lock */
2873	s_unlock(&smp_rv_lock);
2874}
2875