Deleted Added
full compact
mptable.h (71818) mptable.h (72200)
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/i386/include/mptable.h 71818 2001-01-30 04:02:28Z peter $
25 * $FreeBSD: head/sys/i386/include/mptable.h 72200 2001-02-09 06:11:45Z bmilekic $
26 */
27
28#include "opt_cpu.h"
29#include "opt_user_ldt.h"
30
31#ifdef SMP
32#include <machine/smptests.h>
33#else
34#error
35#endif
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#ifdef BETTER_CLOCK
47#include <sys/dkstat.h>
48#endif
49#include <sys/cons.h> /* cngetc() */
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56#ifdef BETTER_CLOCK
57#include <sys/lock.h>
58#include <vm/vm_map.h>
59#include <sys/user.h>
60#ifdef GPROF
61#include <sys/gmon.h>
62#endif
63#endif
64
65#include <machine/smp.h>
66#include <machine/apic.h>
67#include <machine/atomic.h>
68#include <machine/cpufunc.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h> /* setidt() */
79#include <i386/isa/icu.h> /* IPIs */
80#include <i386/isa/intr_machdep.h> /* IPIs */
81#endif /* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1 mpfps->mpfb1
87#endif /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET 0
90#define WARMBOOT_OFF (KERNBASE + 0x0467)
91#define WARMBOOT_SEG (KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE (0xe8000)
95#define BIOS_SIZE (0x18000)
96#else
97#define BIOS_BASE (0xf0000)
98#define BIOS_SIZE (0x10000)
99#endif
100#define BIOS_COUNT (BIOS_SIZE/4)
101
102#define CMOS_REG (0x70)
103#define CMOS_DATA (0x71)
104#define BIOS_RESET (0x0f)
105#define BIOS_WARM (0x0a)
106
107#define PROCENTRY_FLAG_EN 0x01
108#define PROCENTRY_FLAG_BP 0x02
109#define IOAPICENTRY_FLAG_EN 0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114 char signature[4];
115 void *pap;
116 u_char length;
117 u_char spec_rev;
118 u_char checksum;
119 u_char mpfb1;
120 u_char mpfb2;
121 u_char mpfb3;
122 u_char mpfb4;
123 u_char mpfb5;
124} *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128 char signature[4];
129 u_short base_table_length;
130 u_char spec_rev;
131 u_char checksum;
132 u_char oem_id[8];
133 u_char product_id[12];
134 void *oem_table_pointer;
135 u_short oem_table_size;
136 u_short entry_count;
137 void *apic_address;
138 u_short extended_table_length;
139 u_char extended_table_checksum;
140 u_char reserved;
141} *mpcth_t;
142
143
144typedef struct PROCENTRY {
145 u_char type;
146 u_char apic_id;
147 u_char apic_version;
148 u_char cpu_flags;
149 u_long cpu_signature;
150 u_long feature_flags;
151 u_long reserved1;
152 u_long reserved2;
153} *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156 u_char type;
157 u_char bus_id;
158 char bus_type[6];
159} *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162 u_char type;
163 u_char apic_id;
164 u_char apic_version;
165 u_char apic_flags;
166 void *apic_address;
167} *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170 u_char type;
171 u_char int_type;
172 u_short int_flags;
173 u_char src_bus_id;
174 u_char src_bus_irq;
175 u_char dst_apic_id;
176 u_char dst_apic_int;
177} *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181 u_char type;
182 u_char length;
183 char name[16];
184} basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D); \
199 CHECK_WRITE(0x34, (D)); \
200 CHECK_WRITE(0x35, (D)); \
201 CHECK_WRITE(0x36, (D)); \
202 CHECK_WRITE(0x37, (D)); \
203 CHECK_WRITE(0x38, (D)); \
204 CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S); \
207 printf("%s: %d, %d, %d, %d, %d, %d\n", \
208 (S), \
209 CHECK_READ(0x34), \
210 CHECK_READ(0x35), \
211 CHECK_READ(0x36), \
212 CHECK_READ(0x37), \
213 CHECK_READ(0x38), \
214 CHECK_READ(0x39));
215
216#else /* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif /* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST 0x10
227#define MP_PROBE_POST 0x11
228#define MPTABLE_PASS1_POST 0x12
229
230#define MP_START_POST 0x13
231#define MP_ENABLE_POST 0x14
232#define MPTABLE_PASS2_POST 0x15
233
234#define START_ALL_APS_POST 0x16
235#define INSTALL_AP_TRAMP_POST 0x17
236#define START_AP_POST 0x18
237
238#define MP_ANNOUNCE_POST 0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct mtx ap_boot_mtx;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int bsp_apic_ready = 0; /* flags useability of BSP apic */
250int mp_ncpus; /* # of CPUs, including BSP */
251int mp_naps; /* # of Applications processors */
252int mp_nbusses; /* # of busses */
253int mp_napics; /* # of IO APICs */
254int boot_cpu_id; /* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */
257extern int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_REORDER
263struct {
264 volatile int *location;
265 int bit;
266} apic_isrbit_location[32];
267#endif
268
269struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
270
271/*
272 * APIC ID logical/physical mapping structures.
273 * We oversize these to simplify boot-time config.
274 */
275int cpu_num_to_apic_id[NAPICID];
276int io_num_to_apic_id[NAPICID];
277int apic_id_to_logical[NAPICID];
278
279
280/* Bitmap of all available CPUs */
281u_int all_cpus;
282
283/* AP uses this during bootstrap. Do not staticize. */
284char *bootSTK;
285static int bootAP;
286
287/* Hotwire a 0->4MB V==P mapping */
288extern pt_entry_t *KPTphys;
289
290/* SMP page table page */
291extern pt_entry_t *SMPpt;
292
293struct pcb stoppcbs[MAXCPU];
294
295int smp_started; /* has the system started? */
296int smp_active = 0; /* are the APs allowed to run? */
297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298
299/* XXX maybe should be hw.ncpu */
300static int smp_cpus = 1; /* how many cpu's running */
301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302
303int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305
306/* Enable forwarding of a signal to a process running on a different CPU */
307static int forward_signal_enabled = 1;
308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309 &forward_signal_enabled, 0, "");
310
311/* Enable forwarding of roundrobin to all other cpus */
312static int forward_roundrobin_enabled = 1;
313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314 &forward_roundrobin_enabled, 0, "");
315
316
317/*
318 * Local data and functions.
319 */
320
321/* Set to 1 once we're ready to let the APs out of the pen. */
322static volatile int aps_ready = 0;
323
324static int mp_capable;
325static u_int boot_address;
326static u_int base_memory;
327
328static int picmode; /* 0: virtual wire mode, 1: PIC mode */
329static mpfps_t mpfps;
330static int search_for_sig(u_int32_t target, int count);
331static void mp_enable(u_int boot_addr);
332
333static void mptable_pass1(void);
334static int mptable_pass2(void);
335static void default_mp_table(int type);
336static void fix_mp_table(void);
337static void setup_apic_irq_mapping(void);
338static void init_locks(void);
339static int start_all_aps(u_int boot_addr);
340static void install_ap_tramp(u_int boot_addr);
341static int start_ap(int logicalCpu, u_int boot_addr);
342void ap_init(void);
343static int apic_int_is_bus_type(int intr, int bus_type);
344static void release_aps(void *dummy);
345
346/*
347 * initialize all the SMP locks
348 */
349
350/* critical region around IO APIC, apic_imen */
351struct mtx imen_mtx;
352
353/* lock region used by kernel profiling */
354struct mtx mcount_mtx;
355
356#ifdef USE_COMLOCK
357/* locks com (tty) data/hardware accesses: a FASTINTR() */
358struct mtx com_mtx;
359#endif /* USE_COMLOCK */
360
361/* lock around the MP rendezvous */
362static struct mtx smp_rv_mtx;
363
364/* only 1 CPU can panic at a time :) */
365struct mtx panic_mtx;
366
367static void
368init_locks(void)
369{
370 /*
371 * XXX The mcount mutex probably needs to be statically initialized,
372 * since it will be used even in the function calls that get us to this
373 * point.
374 */
375 mtx_init(&mcount_mtx, "mcount", MTX_DEF);
376
377 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
378 mtx_init(&panic_mtx, "panic", MTX_DEF);
379
380#ifdef USE_COMLOCK
381 mtx_init(&com_mtx, "com", MTX_SPIN);
382#endif /* USE_COMLOCK */
383
384 mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
385}
386
387/*
388 * Calculate usable address in base memory for AP trampoline code.
389 */
390u_int
391mp_bootaddress(u_int basemem)
392{
393 POSTCODE(MP_BOOTADDRESS_POST);
394
395 base_memory = basemem * 1024; /* convert to bytes */
396
397 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
398 if ((base_memory - boot_address) < bootMP_size)
399 boot_address -= 4096; /* not enough, lower by 4k */
400
401 return boot_address;
402}
403
404
405/*
406 * Look for an Intel MP spec table (ie, SMP capable hardware).
407 */
408int
409mp_probe(void)
410{
411 int x;
412 u_long segment;
413 u_int32_t target;
414
415 POSTCODE(MP_PROBE_POST);
416
417 /* see if EBDA exists */
418 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
419 /* search first 1K of EBDA */
420 target = (u_int32_t) (segment << 4);
421 if ((x = search_for_sig(target, 1024 / 4)) >= 0)
422 goto found;
423 } else {
424 /* last 1K of base memory, effective 'top of base' passed in */
425 target = (u_int32_t) (base_memory - 0x400);
426 if ((x = search_for_sig(target, 1024 / 4)) >= 0)
427 goto found;
428 }
429
430 /* search the BIOS */
431 target = (u_int32_t) BIOS_BASE;
432 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
433 goto found;
434
435 /* nothing found */
436 mpfps = (mpfps_t)0;
437 mp_capable = 0;
438 return 0;
439
440found:
441 /* calculate needed resources */
442 mpfps = (mpfps_t)x;
443 mptable_pass1();
444
445 /* flag fact that we are running multiple processors */
446 mp_capable = 1;
447 return 1;
448}
449
450
451/*
452 * Initialize the SMP hardware and the APIC and start up the AP's.
453 */
454void
455mp_start(void)
456{
457 POSTCODE(MP_START_POST);
458
459 /* look for MP capable motherboard */
460 if (mp_capable)
461 mp_enable(boot_address);
462 else
463 panic("MP hardware not found!");
464}
465
466
467/*
468 * Print various information about the SMP system hardware and setup.
469 */
470void
471mp_announce(void)
472{
473 int x;
474
475 POSTCODE(MP_ANNOUNCE_POST);
476
477 printf("FreeBSD/SMP: Multiprocessor motherboard\n");
478 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
479 printf(", version: 0x%08x", cpu_apic_versions[0]);
480 printf(", at 0x%08x\n", cpu_apic_address);
481 for (x = 1; x <= mp_naps; ++x) {
482 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
483 printf(", version: 0x%08x", cpu_apic_versions[x]);
484 printf(", at 0x%08x\n", cpu_apic_address);
485 }
486
487#if defined(APIC_IO)
488 for (x = 0; x < mp_napics; ++x) {
489 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
490 printf(", version: 0x%08x", io_apic_versions[x]);
491 printf(", at 0x%08x\n", io_apic_address[x]);
492 }
493#else
494 printf(" Warning: APIC I/O disabled\n");
495#endif /* APIC_IO */
496}
497
498/*
499 * AP cpu's call this to sync up protected mode.
500 */
501void
502init_secondary(void)
503{
504 int gsel_tss;
505 int x, myid = bootAP;
506
507 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
508 gdt_segs[GPROC0_SEL].ssd_base =
509 (int) &SMP_prvspace[myid].globaldata.gd_common_tss;
510 SMP_prvspace[myid].globaldata.gd_prvspace =
511 &SMP_prvspace[myid].globaldata;
512
513 for (x = 0; x < NGDT; x++) {
514 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
515 }
516
517 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
518 r_gdt.rd_base = (int) &gdt[myid * NGDT];
519 lgdt(&r_gdt); /* does magic intra-segment return */
520
521 lidt(&r_idt);
522
523 lldt(_default_ldt);
524#ifdef USER_LDT
525 PCPU_SET(currentldt, _default_ldt);
526#endif
527
528 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
529 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
530 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
531 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
532 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
533 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
534 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
535 ltr(gsel_tss);
536
537 pmap_set_opt();
538}
539
540
541#if defined(APIC_IO)
542/*
543 * Final configuration of the BSP's local APIC:
544 * - disable 'pic mode'.
545 * - disable 'virtual wire mode'.
546 * - enable NMI.
547 */
548void
549bsp_apic_configure(void)
550{
551 u_char byte;
552 u_int32_t temp;
553
554 /* leave 'pic mode' if necessary */
555 if (picmode) {
556 outb(0x22, 0x70); /* select IMCR */
557 byte = inb(0x23); /* current contents */
558 byte |= 0x01; /* mask external INTR */
559 outb(0x23, byte); /* disconnect 8259s/NMI */
560 }
561
562 /* mask lint0 (the 8259 'virtual wire' connection) */
563 temp = lapic.lvt_lint0;
564 temp |= APIC_LVT_M; /* set the mask */
565 lapic.lvt_lint0 = temp;
566
567 /* setup lint1 to handle NMI */
568 temp = lapic.lvt_lint1;
569 temp &= ~APIC_LVT_M; /* clear the mask */
570 lapic.lvt_lint1 = temp;
571
572 if (bootverbose)
573 apic_dump("bsp_apic_configure()");
574}
575#endif /* APIC_IO */
576
577
578/*******************************************************************
579 * local functions and data
580 */
581
582/*
583 * start the SMP system
584 */
585static void
586mp_enable(u_int boot_addr)
587{
588 int x;
589#if defined(APIC_IO)
590 int apic;
591 u_int ux;
592#endif /* APIC_IO */
593
594 POSTCODE(MP_ENABLE_POST);
595
596 /* turn on 4MB of V == P addressing so we can get to MP table */
597 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
598 invltlb();
599
600 /* examine the MP table for needed info, uses physical addresses */
601 x = mptable_pass2();
602
603 *(int *)PTD = 0;
604 invltlb();
605
606 /* can't process default configs till the CPU APIC is pmapped */
607 if (x)
608 default_mp_table(x);
609
610 /* post scan cleanup */
611 fix_mp_table();
612 setup_apic_irq_mapping();
613
614#if defined(APIC_IO)
615
616 /* fill the LOGICAL io_apic_versions table */
617 for (apic = 0; apic < mp_napics; ++apic) {
618 ux = io_apic_read(apic, IOAPIC_VER);
619 io_apic_versions[apic] = ux;
620 io_apic_set_id(apic, IO_TO_ID(apic));
621 }
622
623 /* program each IO APIC in the system */
624 for (apic = 0; apic < mp_napics; ++apic)
625 if (io_apic_setup(apic) < 0)
626 panic("IO APIC setup failure");
627
628 /* install a 'Spurious INTerrupt' vector */
629 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
630 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
631
632 /* install an inter-CPU IPI for TLB invalidation */
633 setidt(XINVLTLB_OFFSET, Xinvltlb,
634 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
635
636#ifdef BETTER_CLOCK
637 /* install an inter-CPU IPI for reading processor state */
638 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
639 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
640#endif
641
642 /* install an inter-CPU IPI for all-CPU rendezvous */
643 setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
644 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
645
646 /* install an inter-CPU IPI for forcing an additional software trap */
647 setidt(XCPUAST_OFFSET, Xcpuast,
648 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
649
650 /* install an inter-CPU IPI for CPU stop/restart */
651 setidt(XCPUSTOP_OFFSET, Xcpustop,
652 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
653
654#if defined(TEST_TEST1)
655 /* install a "fake hardware INTerrupt" vector */
656 setidt(XTEST1_OFFSET, Xtest1,
657 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
658#endif /** TEST_TEST1 */
659
660#endif /* APIC_IO */
661
662 /* initialize all SMP locks */
663 init_locks();
664
665 /* start each Application Processor */
666 start_all_aps(boot_addr);
667}
668
669
670/*
671 * look for the MP spec signature
672 */
673
674/* string defined by the Intel MP Spec as identifying the MP table */
675#define MP_SIG 0x5f504d5f /* _MP_ */
676#define NEXT(X) ((X) += 4)
677static int
678search_for_sig(u_int32_t target, int count)
679{
680 int x;
681 u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
682
683 for (x = 0; x < count; NEXT(x))
684 if (addr[x] == MP_SIG)
685 /* make array index a byte index */
686 return (target + (x * sizeof(u_int32_t)));
687
688 return -1;
689}
690
691
692static basetable_entry basetable_entry_types[] =
693{
694 {0, 20, "Processor"},
695 {1, 8, "Bus"},
696 {2, 8, "I/O APIC"},
697 {3, 8, "I/O INT"},
698 {4, 8, "Local INT"}
699};
700
701typedef struct BUSDATA {
702 u_char bus_id;
703 enum busTypes bus_type;
704} bus_datum;
705
706typedef struct INTDATA {
707 u_char int_type;
708 u_short int_flags;
709 u_char src_bus_id;
710 u_char src_bus_irq;
711 u_char dst_apic_id;
712 u_char dst_apic_int;
713 u_char int_vector;
714} io_int, local_int;
715
716typedef struct BUSTYPENAME {
717 u_char type;
718 char name[7];
719} bus_type_name;
720
721static bus_type_name bus_type_table[] =
722{
723 {CBUS, "CBUS"},
724 {CBUSII, "CBUSII"},
725 {EISA, "EISA"},
726 {MCA, "MCA"},
727 {UNKNOWN_BUSTYPE, "---"},
728 {ISA, "ISA"},
729 {MCA, "MCA"},
730 {UNKNOWN_BUSTYPE, "---"},
731 {UNKNOWN_BUSTYPE, "---"},
732 {UNKNOWN_BUSTYPE, "---"},
733 {UNKNOWN_BUSTYPE, "---"},
734 {UNKNOWN_BUSTYPE, "---"},
735 {PCI, "PCI"},
736 {UNKNOWN_BUSTYPE, "---"},
737 {UNKNOWN_BUSTYPE, "---"},
738 {UNKNOWN_BUSTYPE, "---"},
739 {UNKNOWN_BUSTYPE, "---"},
740 {XPRESS, "XPRESS"},
741 {UNKNOWN_BUSTYPE, "---"}
742};
743/* from MP spec v1.4, table 5-1 */
744static int default_data[7][5] =
745{
746/* nbus, id0, type0, id1, type1 */
747 {1, 0, ISA, 255, 255},
748 {1, 0, EISA, 255, 255},
749 {1, 0, EISA, 255, 255},
750 {1, 0, MCA, 255, 255},
751 {2, 0, ISA, 1, PCI},
752 {2, 0, EISA, 1, PCI},
753 {2, 0, MCA, 1, PCI}
754};
755
756
757/* the bus data */
758static bus_datum *bus_data;
759
760/* the IO INT data, one entry per possible APIC INTerrupt */
761static io_int *io_apic_ints;
762
763static int nintrs;
764
765static int processor_entry __P((proc_entry_ptr entry, int cpu));
766static int bus_entry __P((bus_entry_ptr entry, int bus));
767static int io_apic_entry __P((io_apic_entry_ptr entry, int apic));
768static int int_entry __P((int_entry_ptr entry, int intr));
769static int lookup_bus_type __P((char *name));
770
771
772/*
773 * 1st pass on motherboard's Intel MP specification table.
774 *
775 * initializes:
776 * mp_ncpus = 1
777 *
778 * determines:
779 * cpu_apic_address (common to all CPUs)
780 * io_apic_address[N]
781 * mp_naps
782 * mp_nbusses
783 * mp_napics
784 * nintrs
785 */
786static void
787mptable_pass1(void)
788{
789 int x;
790 mpcth_t cth;
791 int totalSize;
792 void* position;
793 int count;
794 int type;
795
796 POSTCODE(MPTABLE_PASS1_POST);
797
798 /* clear various tables */
799 for (x = 0; x < NAPICID; ++x) {
800 io_apic_address[x] = ~0; /* IO APIC address table */
801 }
802
803 /* init everything to empty */
804 mp_naps = 0;
805 mp_nbusses = 0;
806 mp_napics = 0;
807 nintrs = 0;
808
809 /* check for use of 'default' configuration */
810 if (MPFPS_MPFB1 != 0) {
811 /* use default addresses */
812 cpu_apic_address = DEFAULT_APIC_BASE;
813 io_apic_address[0] = DEFAULT_IO_APIC_BASE;
814
815 /* fill in with defaults */
816 mp_naps = 2; /* includes BSP */
817 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
818#if defined(APIC_IO)
819 mp_napics = 1;
820 nintrs = 16;
821#endif /* APIC_IO */
822 }
823 else {
824 if ((cth = mpfps->pap) == 0)
825 panic("MP Configuration Table Header MISSING!");
826
827 cpu_apic_address = (vm_offset_t) cth->apic_address;
828
829 /* walk the table, recording info of interest */
830 totalSize = cth->base_table_length - sizeof(struct MPCTH);
831 position = (u_char *) cth + sizeof(struct MPCTH);
832 count = cth->entry_count;
833
834 while (count--) {
835 switch (type = *(u_char *) position) {
836 case 0: /* processor_entry */
837 if (((proc_entry_ptr)position)->cpu_flags
838 & PROCENTRY_FLAG_EN)
839 ++mp_naps;
840 break;
841 case 1: /* bus_entry */
842 ++mp_nbusses;
843 break;
844 case 2: /* io_apic_entry */
845 if (((io_apic_entry_ptr)position)->apic_flags
846 & IOAPICENTRY_FLAG_EN)
847 io_apic_address[mp_napics++] =
848 (vm_offset_t)((io_apic_entry_ptr)
849 position)->apic_address;
850 break;
851 case 3: /* int_entry */
852 ++nintrs;
853 break;
854 case 4: /* int_entry */
855 break;
856 default:
857 panic("mpfps Base Table HOSED!");
858 /* NOTREACHED */
859 }
860
861 totalSize -= basetable_entry_types[type].length;
862 (u_char*)position += basetable_entry_types[type].length;
863 }
864 }
865
866 /* qualify the numbers */
867 if (mp_naps > MAXCPU) {
868 printf("Warning: only using %d of %d available CPUs!\n",
869 MAXCPU, mp_naps);
870 mp_naps = MAXCPU;
871 }
872
873 /*
874 * Count the BSP.
875 * This is also used as a counter while starting the APs.
876 */
877 mp_ncpus = 1;
878
879 --mp_naps; /* subtract the BSP */
880}
881
882
883/*
884 * 2nd pass on motherboard's Intel MP specification table.
885 *
886 * sets:
887 * boot_cpu_id
888 * ID_TO_IO(N), phy APIC ID to log CPU/IO table
889 * CPU_TO_ID(N), logical CPU to APIC ID table
890 * IO_TO_ID(N), logical IO to APIC ID table
891 * bus_data[N]
892 * io_apic_ints[N]
893 */
894static int
895mptable_pass2(void)
896{
897 int x;
898 mpcth_t cth;
899 int totalSize;
900 void* position;
901 int count;
902 int type;
903 int apic, bus, cpu, intr;
904 int i, j;
905 int pgeflag;
906
907 POSTCODE(MPTABLE_PASS2_POST);
908
909 pgeflag = 0; /* XXX - Not used under SMP yet. */
910
911 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
912 M_DEVBUF, M_WAITOK);
913 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
914 M_DEVBUF, M_WAITOK);
915 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
916 M_DEVBUF, M_WAITOK);
917 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
918 M_DEVBUF, M_WAITOK);
919
920 bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
921
922 for (i = 0; i < mp_napics; i++) {
923 for (j = 0; j < mp_napics; j++) {
924 /* same page frame as a previous IO apic? */
925 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
926 (io_apic_address[i] & PG_FRAME)) {
927 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
928 + (NPTEPG-2-j) * PAGE_SIZE
929 + (io_apic_address[i] & PAGE_MASK));
930 break;
931 }
932 /* use this slot if available */
933 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
934 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
935 pgeflag | (io_apic_address[i] & PG_FRAME));
936 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
937 + (NPTEPG-2-j) * PAGE_SIZE
938 + (io_apic_address[i] & PAGE_MASK));
939 break;
940 }
941 }
942 }
943
944 /* clear various tables */
945 for (x = 0; x < NAPICID; ++x) {
946 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */
947 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */
948 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */
949 }
950
951 /* clear bus data table */
952 for (x = 0; x < mp_nbusses; ++x)
953 bus_data[x].bus_id = 0xff;
954
955 /* clear IO APIC INT table */
956 for (x = 0; x < (nintrs + 1); ++x) {
957 io_apic_ints[x].int_type = 0xff;
958 io_apic_ints[x].int_vector = 0xff;
959 }
960
961 /* setup the cpu/apic mapping arrays */
962 boot_cpu_id = -1;
963
964 /* record whether PIC or virtual-wire mode */
965 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
966
967 /* check for use of 'default' configuration */
968 if (MPFPS_MPFB1 != 0)
969 return MPFPS_MPFB1; /* return default configuration type */
970
971 if ((cth = mpfps->pap) == 0)
972 panic("MP Configuration Table Header MISSING!");
973
974 /* walk the table, recording info of interest */
975 totalSize = cth->base_table_length - sizeof(struct MPCTH);
976 position = (u_char *) cth + sizeof(struct MPCTH);
977 count = cth->entry_count;
978 apic = bus = intr = 0;
979 cpu = 1; /* pre-count the BSP */
980
981 while (count--) {
982 switch (type = *(u_char *) position) {
983 case 0:
984 if (processor_entry(position, cpu))
985 ++cpu;
986 break;
987 case 1:
988 if (bus_entry(position, bus))
989 ++bus;
990 break;
991 case 2:
992 if (io_apic_entry(position, apic))
993 ++apic;
994 break;
995 case 3:
996 if (int_entry(position, intr))
997 ++intr;
998 break;
999 case 4:
1000 /* int_entry(position); */
1001 break;
1002 default:
1003 panic("mpfps Base Table HOSED!");
1004 /* NOTREACHED */
1005 }
1006
1007 totalSize -= basetable_entry_types[type].length;
1008 (u_char *) position += basetable_entry_types[type].length;
1009 }
1010
1011 if (boot_cpu_id == -1)
1012 panic("NO BSP found!");
1013
1014 /* report fact that its NOT a default configuration */
1015 return 0;
1016}
1017
1018
1019void
1020assign_apic_irq(int apic, int intpin, int irq)
1021{
1022 int x;
1023
1024 if (int_to_apicintpin[irq].ioapic != -1)
1025 panic("assign_apic_irq: inconsistent table");
1026
1027 int_to_apicintpin[irq].ioapic = apic;
1028 int_to_apicintpin[irq].int_pin = intpin;
1029 int_to_apicintpin[irq].apic_address = ioapic[apic];
1030 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1031
1032 for (x = 0; x < nintrs; x++) {
1033 if ((io_apic_ints[x].int_type == 0 ||
1034 io_apic_ints[x].int_type == 3) &&
1035 io_apic_ints[x].int_vector == 0xff &&
1036 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1037 io_apic_ints[x].dst_apic_int == intpin)
1038 io_apic_ints[x].int_vector = irq;
1039 }
1040}
1041
1042void
1043revoke_apic_irq(int irq)
1044{
1045 int x;
1046 int oldapic;
1047 int oldintpin;
1048
1049 if (int_to_apicintpin[irq].ioapic == -1)
1050 panic("assign_apic_irq: inconsistent table");
1051
1052 oldapic = int_to_apicintpin[irq].ioapic;
1053 oldintpin = int_to_apicintpin[irq].int_pin;
1054
1055 int_to_apicintpin[irq].ioapic = -1;
1056 int_to_apicintpin[irq].int_pin = 0;
1057 int_to_apicintpin[irq].apic_address = NULL;
1058 int_to_apicintpin[irq].redirindex = 0;
1059
1060 for (x = 0; x < nintrs; x++) {
1061 if ((io_apic_ints[x].int_type == 0 ||
1062 io_apic_ints[x].int_type == 3) &&
1063 io_apic_ints[x].int_vector == 0xff &&
1064 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1065 io_apic_ints[x].dst_apic_int == oldintpin)
1066 io_apic_ints[x].int_vector = 0xff;
1067 }
1068}
1069
1070
1071static void
1072allocate_apic_irq(int intr)
1073{
1074 int apic;
1075 int intpin;
1076 int irq;
1077
1078 if (io_apic_ints[intr].int_vector != 0xff)
1079 return; /* Interrupt handler already assigned */
1080
1081 if (io_apic_ints[intr].int_type != 0 &&
1082 (io_apic_ints[intr].int_type != 3 ||
1083 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1084 io_apic_ints[intr].dst_apic_int == 0)))
1085 return; /* Not INT or ExtInt on != (0, 0) */
1086
1087 irq = 0;
1088 while (irq < APIC_INTMAPSIZE &&
1089 int_to_apicintpin[irq].ioapic != -1)
1090 irq++;
1091
1092 if (irq >= APIC_INTMAPSIZE)
1093 return; /* No free interrupt handlers */
1094
1095 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1096 intpin = io_apic_ints[intr].dst_apic_int;
1097
1098 assign_apic_irq(apic, intpin, irq);
1099 io_apic_setup_intpin(apic, intpin);
1100}
1101
1102
1103static void
1104swap_apic_id(int apic, int oldid, int newid)
1105{
1106 int x;
1107 int oapic;
1108
1109
1110 if (oldid == newid)
1111 return; /* Nothing to do */
1112
1113 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1114 apic, oldid, newid);
1115
1116 /* Swap physical APIC IDs in interrupt entries */
1117 for (x = 0; x < nintrs; x++) {
1118 if (io_apic_ints[x].dst_apic_id == oldid)
1119 io_apic_ints[x].dst_apic_id = newid;
1120 else if (io_apic_ints[x].dst_apic_id == newid)
1121 io_apic_ints[x].dst_apic_id = oldid;
1122 }
1123
1124 /* Swap physical APIC IDs in IO_TO_ID mappings */
1125 for (oapic = 0; oapic < mp_napics; oapic++)
1126 if (IO_TO_ID(oapic) == newid)
1127 break;
1128
1129 if (oapic < mp_napics) {
1130 printf("Changing APIC ID for IO APIC #%d from "
1131 "%d to %d in MP table\n",
1132 oapic, newid, oldid);
1133 IO_TO_ID(oapic) = oldid;
1134 }
1135 IO_TO_ID(apic) = newid;
1136}
1137
1138
1139static void
1140fix_id_to_io_mapping(void)
1141{
1142 int x;
1143
1144 for (x = 0; x < NAPICID; x++)
1145 ID_TO_IO(x) = -1;
1146
1147 for (x = 0; x <= mp_naps; x++)
1148 if (CPU_TO_ID(x) < NAPICID)
1149 ID_TO_IO(CPU_TO_ID(x)) = x;
1150
1151 for (x = 0; x < mp_napics; x++)
1152 if (IO_TO_ID(x) < NAPICID)
1153 ID_TO_IO(IO_TO_ID(x)) = x;
1154}
1155
1156
1157static int
1158first_free_apic_id(void)
1159{
1160 int freeid, x;
1161
1162 for (freeid = 0; freeid < NAPICID; freeid++) {
1163 for (x = 0; x <= mp_naps; x++)
1164 if (CPU_TO_ID(x) == freeid)
1165 break;
1166 if (x <= mp_naps)
1167 continue;
1168 for (x = 0; x < mp_napics; x++)
1169 if (IO_TO_ID(x) == freeid)
1170 break;
1171 if (x < mp_napics)
1172 continue;
1173 return freeid;
1174 }
1175 return freeid;
1176}
1177
1178
1179static int
1180io_apic_id_acceptable(int apic, int id)
1181{
1182 int cpu; /* Logical CPU number */
1183 int oapic; /* Logical IO APIC number for other IO APIC */
1184
1185 if (id >= NAPICID)
1186 return 0; /* Out of range */
1187
1188 for (cpu = 0; cpu <= mp_naps; cpu++)
1189 if (CPU_TO_ID(cpu) == id)
1190 return 0; /* Conflict with CPU */
1191
1192 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1193 if (IO_TO_ID(oapic) == id)
1194 return 0; /* Conflict with other APIC */
1195
1196 return 1; /* ID is acceptable for IO APIC */
1197}
1198
1199
1200/*
1201 * parse an Intel MP specification table
1202 */
1203static void
1204fix_mp_table(void)
1205{
1206 int x;
1207 int id;
1208 int bus_0 = 0; /* Stop GCC warning */
1209 int bus_pci = 0; /* Stop GCC warning */
1210 int num_pci_bus;
1211 int apic; /* IO APIC unit number */
1212 int freeid; /* Free physical APIC ID */
1213 int physid; /* Current physical IO APIC ID */
1214
1215 /*
1216 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1217 * did it wrong. The MP spec says that when more than 1 PCI bus
1218 * exists the BIOS must begin with bus entries for the PCI bus and use
1219 * actual PCI bus numbering. This implies that when only 1 PCI bus
1220 * exists the BIOS can choose to ignore this ordering, and indeed many
1221 * MP motherboards do ignore it. This causes a problem when the PCI
1222 * sub-system makes requests of the MP sub-system based on PCI bus
1223 * numbers. So here we look for the situation and renumber the
1224 * busses and associated INTs in an effort to "make it right".
1225 */
1226
1227 /* find bus 0, PCI bus, count the number of PCI busses */
1228 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1229 if (bus_data[x].bus_id == 0) {
1230 bus_0 = x;
1231 }
1232 if (bus_data[x].bus_type == PCI) {
1233 ++num_pci_bus;
1234 bus_pci = x;
1235 }
1236 }
1237 /*
1238 * bus_0 == slot of bus with ID of 0
1239 * bus_pci == slot of last PCI bus encountered
1240 */
1241
1242 /* check the 1 PCI bus case for sanity */
1243 /* if it is number 0 all is well */
1244 if (num_pci_bus == 1 &&
1245 bus_data[bus_pci].bus_id != 0) {
1246
1247 /* mis-numbered, swap with whichever bus uses slot 0 */
1248
1249 /* swap the bus entry types */
1250 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1251 bus_data[bus_0].bus_type = PCI;
1252
1253 /* swap each relavant INTerrupt entry */
1254 id = bus_data[bus_pci].bus_id;
1255 for (x = 0; x < nintrs; ++x) {
1256 if (io_apic_ints[x].src_bus_id == id) {
1257 io_apic_ints[x].src_bus_id = 0;
1258 }
1259 else if (io_apic_ints[x].src_bus_id == 0) {
1260 io_apic_ints[x].src_bus_id = id;
1261 }
1262 }
1263 }
1264
1265 /* Assign IO APIC IDs.
1266 *
1267 * First try the existing ID. If a conflict is detected, try
1268 * the ID in the MP table. If a conflict is still detected, find
1269 * a free id.
1270 *
1271 * We cannot use the ID_TO_IO table before all conflicts has been
1272 * resolved and the table has been corrected.
1273 */
1274 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1275
1276 /* First try to use the value set by the BIOS */
1277 physid = io_apic_get_id(apic);
1278 if (io_apic_id_acceptable(apic, physid)) {
1279 if (IO_TO_ID(apic) != physid)
1280 swap_apic_id(apic, IO_TO_ID(apic), physid);
1281 continue;
1282 }
1283
1284 /* Then check if the value in the MP table is acceptable */
1285 if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1286 continue;
1287
1288 /* Last resort, find a free APIC ID and use it */
1289 freeid = first_free_apic_id();
1290 if (freeid >= NAPICID)
1291 panic("No free physical APIC IDs found");
1292
1293 if (io_apic_id_acceptable(apic, freeid)) {
1294 swap_apic_id(apic, IO_TO_ID(apic), freeid);
1295 continue;
1296 }
1297 panic("Free physical APIC ID not usable");
1298 }
1299 fix_id_to_io_mapping();
1300
1301 /* detect and fix broken Compaq MP table */
1302 if (apic_int_type(0, 0) == -1) {
1303 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1304 io_apic_ints[nintrs].int_type = 3; /* ExtInt */
1305 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */
1306 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1307 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1308 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */
1309 nintrs++;
1310 }
1311}
1312
1313
1314/* Assign low level interrupt handlers */
1315static void
1316setup_apic_irq_mapping(void)
1317{
1318 int x;
1319 int int_vector;
1320
1321 /* Clear array */
1322 for (x = 0; x < APIC_INTMAPSIZE; x++) {
1323 int_to_apicintpin[x].ioapic = -1;
1324 int_to_apicintpin[x].int_pin = 0;
1325 int_to_apicintpin[x].apic_address = NULL;
1326 int_to_apicintpin[x].redirindex = 0;
1327 }
1328
1329 /* First assign ISA/EISA interrupts */
1330 for (x = 0; x < nintrs; x++) {
1331 int_vector = io_apic_ints[x].src_bus_irq;
1332 if (int_vector < APIC_INTMAPSIZE &&
1333 io_apic_ints[x].int_vector == 0xff &&
1334 int_to_apicintpin[int_vector].ioapic == -1 &&
1335 (apic_int_is_bus_type(x, ISA) ||
1336 apic_int_is_bus_type(x, EISA)) &&
1337 io_apic_ints[x].int_type == 0) {
1338 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1339 io_apic_ints[x].dst_apic_int,
1340 int_vector);
1341 }
1342 }
1343
1344 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1345 for (x = 0; x < nintrs; x++) {
1346 if (io_apic_ints[x].dst_apic_int == 0 &&
1347 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1348 io_apic_ints[x].int_vector == 0xff &&
1349 int_to_apicintpin[0].ioapic == -1 &&
1350 io_apic_ints[x].int_type == 3) {
1351 assign_apic_irq(0, 0, 0);
1352 break;
1353 }
1354 }
1355 /* PCI interrupt assignment is deferred */
1356}
1357
1358
1359static int
1360processor_entry(proc_entry_ptr entry, int cpu)
1361{
1362 /* check for usability */
1363 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1364 return 0;
1365
1366 if(entry->apic_id >= NAPICID)
1367 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1368 /* check for BSP flag */
1369 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1370 boot_cpu_id = entry->apic_id;
1371 CPU_TO_ID(0) = entry->apic_id;
1372 ID_TO_CPU(entry->apic_id) = 0;
1373 return 0; /* its already been counted */
1374 }
1375
1376 /* add another AP to list, if less than max number of CPUs */
1377 else if (cpu < MAXCPU) {
1378 CPU_TO_ID(cpu) = entry->apic_id;
1379 ID_TO_CPU(entry->apic_id) = cpu;
1380 return 1;
1381 }
1382
1383 return 0;
1384}
1385
1386
1387static int
1388bus_entry(bus_entry_ptr entry, int bus)
1389{
1390 int x;
1391 char c, name[8];
1392
1393 /* encode the name into an index */
1394 for (x = 0; x < 6; ++x) {
1395 if ((c = entry->bus_type[x]) == ' ')
1396 break;
1397 name[x] = c;
1398 }
1399 name[x] = '\0';
1400
1401 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1402 panic("unknown bus type: '%s'", name);
1403
1404 bus_data[bus].bus_id = entry->bus_id;
1405 bus_data[bus].bus_type = x;
1406
1407 return 1;
1408}
1409
1410
1411static int
1412io_apic_entry(io_apic_entry_ptr entry, int apic)
1413{
1414 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1415 return 0;
1416
1417 IO_TO_ID(apic) = entry->apic_id;
1418 if (entry->apic_id < NAPICID)
1419 ID_TO_IO(entry->apic_id) = apic;
1420
1421 return 1;
1422}
1423
1424
1425static int
1426lookup_bus_type(char *name)
1427{
1428 int x;
1429
1430 for (x = 0; x < MAX_BUSTYPE; ++x)
1431 if (strcmp(bus_type_table[x].name, name) == 0)
1432 return bus_type_table[x].type;
1433
1434 return UNKNOWN_BUSTYPE;
1435}
1436
1437
1438static int
1439int_entry(int_entry_ptr entry, int intr)
1440{
1441 int apic;
1442
1443 io_apic_ints[intr].int_type = entry->int_type;
1444 io_apic_ints[intr].int_flags = entry->int_flags;
1445 io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1446 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1447 if (entry->dst_apic_id == 255) {
1448 /* This signal goes to all IO APICS. Select an IO APIC
1449 with sufficient number of interrupt pins */
1450 for (apic = 0; apic < mp_napics; apic++)
1451 if (((io_apic_read(apic, IOAPIC_VER) &
1452 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1453 entry->dst_apic_int)
1454 break;
1455 if (apic < mp_napics)
1456 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1457 else
1458 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1459 } else
1460 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1461 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1462
1463 return 1;
1464}
1465
1466
1467static int
1468apic_int_is_bus_type(int intr, int bus_type)
1469{
1470 int bus;
1471
1472 for (bus = 0; bus < mp_nbusses; ++bus)
1473 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1474 && ((int) bus_data[bus].bus_type == bus_type))
1475 return 1;
1476
1477 return 0;
1478}
1479
1480
1481/*
1482 * Given a traditional ISA INT mask, return an APIC mask.
1483 */
1484u_int
1485isa_apic_mask(u_int isa_mask)
1486{
1487 int isa_irq;
1488 int apic_pin;
1489
1490#if defined(SKIP_IRQ15_REDIRECT)
1491 if (isa_mask == (1 << 15)) {
1492 printf("skipping ISA IRQ15 redirect\n");
1493 return isa_mask;
1494 }
1495#endif /* SKIP_IRQ15_REDIRECT */
1496
1497 isa_irq = ffs(isa_mask); /* find its bit position */
1498 if (isa_irq == 0) /* doesn't exist */
1499 return 0;
1500 --isa_irq; /* make it zero based */
1501
1502 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */
1503 if (apic_pin == -1)
1504 return 0;
1505
1506 return (1 << apic_pin); /* convert pin# to a mask */
1507}
1508
1509
1510/*
1511 * Determine which APIC pin an ISA/EISA INT is attached to.
1512 */
1513#define INTTYPE(I) (io_apic_ints[(I)].int_type)
1514#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int)
1515#define INTIRQ(I) (io_apic_ints[(I)].int_vector)
1516#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1517
1518#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq)
1519int
1520isa_apic_irq(int isa_irq)
1521{
1522 int intr;
1523
1524 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1525 if (INTTYPE(intr) == 0) { /* standard INT */
1526 if (SRCBUSIRQ(intr) == isa_irq) {
1527 if (apic_int_is_bus_type(intr, ISA) ||
1528 apic_int_is_bus_type(intr, EISA)) {
1529 if (INTIRQ(intr) == 0xff)
1530 return -1; /* unassigned */
1531 return INTIRQ(intr); /* found */
1532 }
1533 }
1534 }
1535 }
1536 return -1; /* NOT found */
1537}
1538
1539
1540/*
1541 * Determine which APIC pin a PCI INT is attached to.
1542 */
1543#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id)
1544#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1545#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03)
1546int
1547pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1548{
1549 int intr;
1550
1551 --pciInt; /* zero based */
1552
1553 for (intr = 0; intr < nintrs; ++intr) /* check each record */
1554 if ((INTTYPE(intr) == 0) /* standard INT */
1555 && (SRCBUSID(intr) == pciBus)
1556 && (SRCBUSDEVICE(intr) == pciDevice)
1557 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */
1558 if (apic_int_is_bus_type(intr, PCI)) {
1559 if (INTIRQ(intr) == 0xff)
1560 allocate_apic_irq(intr);
1561 if (INTIRQ(intr) == 0xff)
1562 return -1; /* unassigned */
1563 return INTIRQ(intr); /* exact match */
1564 }
1565
1566 return -1; /* NOT found */
1567}
1568
1569int
1570next_apic_irq(int irq)
1571{
1572 int intr, ointr;
1573 int bus, bustype;
1574
1575 bus = 0;
1576 bustype = 0;
1577 for (intr = 0; intr < nintrs; intr++) {
1578 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1579 continue;
1580 bus = SRCBUSID(intr);
1581 bustype = apic_bus_type(bus);
1582 if (bustype != ISA &&
1583 bustype != EISA &&
1584 bustype != PCI)
1585 continue;
1586 break;
1587 }
1588 if (intr >= nintrs) {
1589 return -1;
1590 }
1591 for (ointr = intr + 1; ointr < nintrs; ointr++) {
1592 if (INTTYPE(ointr) != 0)
1593 continue;
1594 if (bus != SRCBUSID(ointr))
1595 continue;
1596 if (bustype == PCI) {
1597 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1598 continue;
1599 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1600 continue;
1601 }
1602 if (bustype == ISA || bustype == EISA) {
1603 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1604 continue;
1605 }
1606 if (INTPIN(intr) == INTPIN(ointr))
1607 continue;
1608 break;
1609 }
1610 if (ointr >= nintrs) {
1611 return -1;
1612 }
1613 return INTIRQ(ointr);
1614}
1615#undef SRCBUSLINE
1616#undef SRCBUSDEVICE
1617#undef SRCBUSID
1618#undef SRCBUSIRQ
1619
1620#undef INTPIN
1621#undef INTIRQ
1622#undef INTAPIC
1623#undef INTTYPE
1624
1625
1626/*
1627 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1628 *
1629 * XXX FIXME:
1630 * Exactly what this means is unclear at this point. It is a solution
1631 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard
1632 * could route any of the ISA INTs to upper (>15) IRQ values. But most would
1633 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1634 * option.
1635 */
1636int
1637undirect_isa_irq(int rirq)
1638{
1639#if defined(READY)
1640 if (bootverbose)
1641 printf("Freeing redirected ISA irq %d.\n", rirq);
1642 /** FIXME: tickle the MB redirector chip */
1643 return -1;
1644#else
1645 if (bootverbose)
1646 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1647 return 0;
1648#endif /* READY */
1649}
1650
1651
1652/*
1653 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1654 */
1655int
1656undirect_pci_irq(int rirq)
1657{
1658#if defined(READY)
1659 if (bootverbose)
1660 printf("Freeing redirected PCI irq %d.\n", rirq);
1661
1662 /** FIXME: tickle the MB redirector chip */
1663 return -1;
1664#else
1665 if (bootverbose)
1666 printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1667 rirq);
1668 return 0;
1669#endif /* READY */
1670}
1671
1672
1673/*
1674 * given a bus ID, return:
1675 * the bus type if found
1676 * -1 if NOT found
1677 */
1678int
1679apic_bus_type(int id)
1680{
1681 int x;
1682
1683 for (x = 0; x < mp_nbusses; ++x)
1684 if (bus_data[x].bus_id == id)
1685 return bus_data[x].bus_type;
1686
1687 return -1;
1688}
1689
1690
1691/*
1692 * given a LOGICAL APIC# and pin#, return:
1693 * the associated src bus ID if found
1694 * -1 if NOT found
1695 */
1696int
1697apic_src_bus_id(int apic, int pin)
1698{
1699 int x;
1700
1701 /* search each of the possible INTerrupt sources */
1702 for (x = 0; x < nintrs; ++x)
1703 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1704 (pin == io_apic_ints[x].dst_apic_int))
1705 return (io_apic_ints[x].src_bus_id);
1706
1707 return -1; /* NOT found */
1708}
1709
1710
1711/*
1712 * given a LOGICAL APIC# and pin#, return:
1713 * the associated src bus IRQ if found
1714 * -1 if NOT found
1715 */
1716int
1717apic_src_bus_irq(int apic, int pin)
1718{
1719 int x;
1720
1721 for (x = 0; x < nintrs; x++)
1722 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1723 (pin == io_apic_ints[x].dst_apic_int))
1724 return (io_apic_ints[x].src_bus_irq);
1725
1726 return -1; /* NOT found */
1727}
1728
1729
1730/*
1731 * given a LOGICAL APIC# and pin#, return:
1732 * the associated INTerrupt type if found
1733 * -1 if NOT found
1734 */
1735int
1736apic_int_type(int apic, int pin)
1737{
1738 int x;
1739
1740 /* search each of the possible INTerrupt sources */
1741 for (x = 0; x < nintrs; ++x)
1742 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1743 (pin == io_apic_ints[x].dst_apic_int))
1744 return (io_apic_ints[x].int_type);
1745
1746 return -1; /* NOT found */
1747}
1748
1749int
1750apic_irq(int apic, int pin)
1751{
1752 int x;
1753 int res;
1754
1755 for (x = 0; x < nintrs; ++x)
1756 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1757 (pin == io_apic_ints[x].dst_apic_int)) {
1758 res = io_apic_ints[x].int_vector;
1759 if (res == 0xff)
1760 return -1;
1761 if (apic != int_to_apicintpin[res].ioapic)
1762 panic("apic_irq: inconsistent table");
1763 if (pin != int_to_apicintpin[res].int_pin)
1764 panic("apic_irq inconsistent table (2)");
1765 return res;
1766 }
1767 return -1;
1768}
1769
1770
1771/*
1772 * given a LOGICAL APIC# and pin#, return:
1773 * the associated trigger mode if found
1774 * -1 if NOT found
1775 */
1776int
1777apic_trigger(int apic, int pin)
1778{
1779 int x;
1780
1781 /* search each of the possible INTerrupt sources */
1782 for (x = 0; x < nintrs; ++x)
1783 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1784 (pin == io_apic_ints[x].dst_apic_int))
1785 return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1786
1787 return -1; /* NOT found */
1788}
1789
1790
1791/*
1792 * given a LOGICAL APIC# and pin#, return:
1793 * the associated 'active' level if found
1794 * -1 if NOT found
1795 */
1796int
1797apic_polarity(int apic, int pin)
1798{
1799 int x;
1800
1801 /* search each of the possible INTerrupt sources */
1802 for (x = 0; x < nintrs; ++x)
1803 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1804 (pin == io_apic_ints[x].dst_apic_int))
1805 return (io_apic_ints[x].int_flags & 0x03);
1806
1807 return -1; /* NOT found */
1808}
1809
1810
1811/*
1812 * set data according to MP defaults
1813 * FIXME: probably not complete yet...
1814 */
1815static void
1816default_mp_table(int type)
1817{
1818 int ap_cpu_id;
1819#if defined(APIC_IO)
1820 int io_apic_id;
1821 int pin;
1822#endif /* APIC_IO */
1823
1824#if 0
1825 printf(" MP default config type: %d\n", type);
1826 switch (type) {
1827 case 1:
1828 printf(" bus: ISA, APIC: 82489DX\n");
1829 break;
1830 case 2:
1831 printf(" bus: EISA, APIC: 82489DX\n");
1832 break;
1833 case 3:
1834 printf(" bus: EISA, APIC: 82489DX\n");
1835 break;
1836 case 4:
1837 printf(" bus: MCA, APIC: 82489DX\n");
1838 break;
1839 case 5:
1840 printf(" bus: ISA+PCI, APIC: Integrated\n");
1841 break;
1842 case 6:
1843 printf(" bus: EISA+PCI, APIC: Integrated\n");
1844 break;
1845 case 7:
1846 printf(" bus: MCA+PCI, APIC: Integrated\n");
1847 break;
1848 default:
1849 printf(" future type\n");
1850 break;
1851 /* NOTREACHED */
1852 }
1853#endif /* 0 */
1854
1855 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1856 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1857
1858 /* BSP */
1859 CPU_TO_ID(0) = boot_cpu_id;
1860 ID_TO_CPU(boot_cpu_id) = 0;
1861
1862 /* one and only AP */
1863 CPU_TO_ID(1) = ap_cpu_id;
1864 ID_TO_CPU(ap_cpu_id) = 1;
1865
1866#if defined(APIC_IO)
1867 /* one and only IO APIC */
1868 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1869
1870 /*
1871 * sanity check, refer to MP spec section 3.6.6, last paragraph
1872 * necessary as some hardware isn't properly setting up the IO APIC
1873 */
1874#if defined(REALLY_ANAL_IOAPICID_VALUE)
1875 if (io_apic_id != 2) {
1876#else
1877 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1878#endif /* REALLY_ANAL_IOAPICID_VALUE */
1879 io_apic_set_id(0, 2);
1880 io_apic_id = 2;
1881 }
1882 IO_TO_ID(0) = io_apic_id;
1883 ID_TO_IO(io_apic_id) = 0;
1884#endif /* APIC_IO */
1885
1886 /* fill out bus entries */
1887 switch (type) {
1888 case 1:
1889 case 2:
1890 case 3:
1891 case 4:
1892 case 5:
1893 case 6:
1894 case 7:
1895 bus_data[0].bus_id = default_data[type - 1][1];
1896 bus_data[0].bus_type = default_data[type - 1][2];
1897 bus_data[1].bus_id = default_data[type - 1][3];
1898 bus_data[1].bus_type = default_data[type - 1][4];
1899 break;
1900
1901 /* case 4: case 7: MCA NOT supported */
1902 default: /* illegal/reserved */
1903 panic("BAD default MP config: %d", type);
1904 /* NOTREACHED */
1905 }
1906
1907#if defined(APIC_IO)
1908 /* general cases from MP v1.4, table 5-2 */
1909 for (pin = 0; pin < 16; ++pin) {
1910 io_apic_ints[pin].int_type = 0;
1911 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */
1912 io_apic_ints[pin].src_bus_id = 0;
1913 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */
1914 io_apic_ints[pin].dst_apic_id = io_apic_id;
1915 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */
1916 }
1917
1918 /* special cases from MP v1.4, table 5-2 */
1919 if (type == 2) {
1920 io_apic_ints[2].int_type = 0xff; /* N/C */
1921 io_apic_ints[13].int_type = 0xff; /* N/C */
1922#if !defined(APIC_MIXED_MODE)
1923 /** FIXME: ??? */
1924 panic("sorry, can't support type 2 default yet");
1925#endif /* APIC_MIXED_MODE */
1926 }
1927 else
1928 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */
1929
1930 if (type == 7)
1931 io_apic_ints[0].int_type = 0xff; /* N/C */
1932 else
1933 io_apic_ints[0].int_type = 3; /* vectored 8259 */
1934#endif /* APIC_IO */
1935}
1936
1937
1938/*
1939 * start each AP in our list
1940 */
1941static int
1942start_all_aps(u_int boot_addr)
1943{
1944 int x, i, pg;
1945 u_char mpbiosreason;
1946 u_long mpbioswarmvec;
1947 struct globaldata *gd;
1948 char *stack;
1949
1950 POSTCODE(START_ALL_APS_POST);
1951
1952 /* initialize BSP's local APIC */
1953 apic_initialize();
1954 bsp_apic_ready = 1;
1955
1956 /* install the AP 1st level boot code */
1957 install_ap_tramp(boot_addr);
1958
1959
1960 /* save the current value of the warm-start vector */
1961 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1962#ifndef PC98
1963 outb(CMOS_REG, BIOS_RESET);
1964 mpbiosreason = inb(CMOS_DATA);
1965#endif
1966
1967 /* record BSP in CPU map */
1968 all_cpus = 1;
1969
1970 /* set up 0 -> 4MB P==V mapping for AP boot */
1971 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1972 invltlb();
1973
1974 /* start each AP */
1975 for (x = 1; x <= mp_naps; ++x) {
1976
1977 /* This is a bit verbose, it will go away soon. */
1978
1979 /* first page of AP's private space */
1980 pg = x * i386_btop(sizeof(struct privatespace));
1981
1982 /* allocate a new private data page */
1983 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1984
1985 /* wire it into the private page table page */
1986 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1987
1988 /* allocate and set up an idle stack data page */
1989 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1990 for (i = 0; i < UPAGES; i++)
1991 SMPpt[pg + 1 + i] = (pt_entry_t)
1992 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1993
1994 /* prime data page for it to use */
1995 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1996 gd->gd_cpuid = x;
1997 gd->gd_cpu_lockid = x << 24;
1998
1999 /* setup a vector to our boot code */
2000 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2001 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2002#ifndef PC98
2003 outb(CMOS_REG, BIOS_RESET);
2004 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2005#endif
2006
2007 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2008 bootAP = x;
2009
2010 /* attempt to start the Application Processor */
2011 CHECK_INIT(99); /* setup checkpoints */
2012 if (!start_ap(x, boot_addr)) {
2013 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2014 CHECK_PRINT("trace"); /* show checkpoints */
2015 /* better panic as the AP may be running loose */
2016 printf("panic y/n? [y] ");
2017 if (cngetc() != 'n')
2018 panic("bye-bye");
2019 }
2020 CHECK_PRINT("trace"); /* show checkpoints */
2021
2022 /* record its version info */
2023 cpu_apic_versions[x] = cpu_apic_versions[0];
2024
2025 all_cpus |= (1 << x); /* record AP in CPU map */
2026 }
2027
2028 /* build our map of 'other' CPUs */
2029 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2030
2031 /* fill in our (BSP) APIC version */
2032 cpu_apic_versions[0] = lapic.version;
2033
2034 /* restore the warmstart vector */
2035 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2036#ifndef PC98
2037 outb(CMOS_REG, BIOS_RESET);
2038 outb(CMOS_DATA, mpbiosreason);
2039#endif
2040
2041 /*
2042 * Set up the idle context for the BSP. Similar to above except
2043 * that some was done by locore, some by pmap.c and some is implicit
2044 * because the BSP is cpu#0 and the page is initially zero, and also
2045 * because we can refer to variables by name on the BSP..
2046 */
2047
2048 /* Allocate and setup BSP idle stack */
2049 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2050 for (i = 0; i < UPAGES; i++)
2051 SMPpt[1 + i] = (pt_entry_t)
2052 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2053
2054 *(int *)PTD = 0;
2055 pmap_set_opt();
2056
2057 /* number of APs actually started */
2058 return mp_ncpus - 1;
2059}
2060
2061
2062/*
2063 * load the 1st level AP boot code into base memory.
2064 */
2065
2066/* targets for relocation */
2067extern void bigJump(void);
2068extern void bootCodeSeg(void);
2069extern void bootDataSeg(void);
2070extern void MPentry(void);
2071extern u_int MP_GDT;
2072extern u_int mp_gdtbase;
2073
2074static void
2075install_ap_tramp(u_int boot_addr)
2076{
2077 int x;
2078 int size = *(int *) ((u_long) & bootMP_size);
2079 u_char *src = (u_char *) ((u_long) bootMP);
2080 u_char *dst = (u_char *) boot_addr + KERNBASE;
2081 u_int boot_base = (u_int) bootMP;
2082 u_int8_t *dst8;
2083 u_int16_t *dst16;
2084 u_int32_t *dst32;
2085
2086 POSTCODE(INSTALL_AP_TRAMP_POST);
2087
2088 for (x = 0; x < size; ++x)
2089 *dst++ = *src++;
2090
2091 /*
2092 * modify addresses in code we just moved to basemem. unfortunately we
2093 * need fairly detailed info about mpboot.s for this to work. changes
2094 * to mpboot.s might require changes here.
2095 */
2096
2097 /* boot code is located in KERNEL space */
2098 dst = (u_char *) boot_addr + KERNBASE;
2099
2100 /* modify the lgdt arg */
2101 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2102 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2103
2104 /* modify the ljmp target for MPentry() */
2105 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2106 *dst32 = ((u_int) MPentry - KERNBASE);
2107
2108 /* modify the target for boot code segment */
2109 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2110 dst8 = (u_int8_t *) (dst16 + 1);
2111 *dst16 = (u_int) boot_addr & 0xffff;
2112 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2113
2114 /* modify the target for boot data segment */
2115 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2116 dst8 = (u_int8_t *) (dst16 + 1);
2117 *dst16 = (u_int) boot_addr & 0xffff;
2118 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2119}
2120
2121
2122/*
2123 * this function starts the AP (application processor) identified
2124 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
2125 * to accomplish this. This is necessary because of the nuances
2126 * of the different hardware we might encounter. It ain't pretty,
2127 * but it seems to work.
2128 */
2129static int
2130start_ap(int logical_cpu, u_int boot_addr)
2131{
2132 int physical_cpu;
2133 int vector;
2134 int cpus;
2135 u_long icr_lo, icr_hi;
2136
2137 POSTCODE(START_AP_POST);
2138
2139 /* get the PHYSICAL APIC ID# */
2140 physical_cpu = CPU_TO_ID(logical_cpu);
2141
2142 /* calculate the vector */
2143 vector = (boot_addr >> 12) & 0xff;
2144
2145 /* used as a watchpoint to signal AP startup */
2146 cpus = mp_ncpus;
2147
2148 /*
2149 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2150 * and running the target CPU. OR this INIT IPI might be latched (P5
2151 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2152 * ignored.
2153 */
2154
2155 /* setup the address for the target AP */
2156 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2157 icr_hi |= (physical_cpu << 24);
2158 lapic.icr_hi = icr_hi;
2159
2160 /* do an INIT IPI: assert RESET */
2161 icr_lo = lapic.icr_lo & 0xfff00000;
2162 lapic.icr_lo = icr_lo | 0x0000c500;
2163
2164 /* wait for pending status end */
2165 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2166 /* spin */ ;
2167
2168 /* do an INIT IPI: deassert RESET */
2169 lapic.icr_lo = icr_lo | 0x00008500;
2170
2171 /* wait for pending status end */
2172 u_sleep(10000); /* wait ~10mS */
2173 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2174 /* spin */ ;
2175
2176 /*
2177 * next we do a STARTUP IPI: the previous INIT IPI might still be
2178 * latched, (P5 bug) this 1st STARTUP would then terminate
2179 * immediately, and the previously started INIT IPI would continue. OR
2180 * the previous INIT IPI has already run. and this STARTUP IPI will
2181 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2182 * will run.
2183 */
2184
2185 /* do a STARTUP IPI */
2186 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2187 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2188 /* spin */ ;
2189 u_sleep(200); /* wait ~200uS */
2190
2191 /*
2192 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2193 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2194 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2195 * recognized after hardware RESET or INIT IPI.
2196 */
2197
2198 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2199 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2200 /* spin */ ;
2201 u_sleep(200); /* wait ~200uS */
2202
2203 /* wait for it to start */
2204 set_apic_timer(5000000);/* == 5 seconds */
2205 while (read_apic_timer())
2206 if (mp_ncpus > cpus)
2207 return 1; /* return SUCCESS */
2208
2209 return 0; /* return FAILURE */
2210}
2211
2212/*
2213 * Flush the TLB on all other CPU's
2214 *
2215 * XXX: Needs to handshake and wait for completion before proceding.
2216 */
2217void
2218smp_invltlb(void)
2219{
2220#if defined(APIC_IO)
2221 if (smp_started && invltlb_ok)
2222 all_but_self_ipi(XINVLTLB_OFFSET);
2223#endif /* APIC_IO */
2224}
2225
2226void
2227invlpg(u_int addr)
2228{
2229 __asm __volatile("invlpg (%0)"::"r"(addr):"memory");
2230
2231 /* send a message to the other CPUs */
2232 smp_invltlb();
2233}
2234
2235void
2236invltlb(void)
2237{
2238 u_long temp;
2239
2240 /*
2241 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2242 * inlined.
2243 */
2244 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2245
2246 /* send a message to the other CPUs */
2247 smp_invltlb();
2248}
2249
2250
2251/*
2252 * This is called once the rest of the system is up and running and we're
2253 * ready to let the AP's out of the pen.
2254 */
2255void
2256ap_init(void)
2257{
2258 u_int apic_id;
2259
2260 /* spin until all the AP's are ready */
2261 while (!aps_ready)
2262 /* spin */ ;
2263
2264 /*
2265 * Set curproc to our per-cpu idleproc so that mutexes have
2266 * something unique to lock with.
2267 */
2268 PCPU_SET(curproc, PCPU_GET(idleproc));
2269
2270 /* lock against other AP's that are waking up */
26 */
27
28#include "opt_cpu.h"
29#include "opt_user_ldt.h"
30
31#ifdef SMP
32#include <machine/smptests.h>
33#else
34#error
35#endif
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#ifdef BETTER_CLOCK
47#include <sys/dkstat.h>
48#endif
49#include <sys/cons.h> /* cngetc() */
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56#ifdef BETTER_CLOCK
57#include <sys/lock.h>
58#include <vm/vm_map.h>
59#include <sys/user.h>
60#ifdef GPROF
61#include <sys/gmon.h>
62#endif
63#endif
64
65#include <machine/smp.h>
66#include <machine/apic.h>
67#include <machine/atomic.h>
68#include <machine/cpufunc.h>
69#include <machine/mpapic.h>
70#include <machine/psl.h>
71#include <machine/segments.h>
72#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73#include <machine/tss.h>
74#include <machine/specialreg.h>
75#include <machine/globaldata.h>
76
77#if defined(APIC_IO)
78#include <machine/md_var.h> /* setidt() */
79#include <i386/isa/icu.h> /* IPIs */
80#include <i386/isa/intr_machdep.h> /* IPIs */
81#endif /* APIC_IO */
82
83#if defined(TEST_DEFAULT_CONFIG)
84#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG
85#else
86#define MPFPS_MPFB1 mpfps->mpfb1
87#endif /* TEST_DEFAULT_CONFIG */
88
89#define WARMBOOT_TARGET 0
90#define WARMBOOT_OFF (KERNBASE + 0x0467)
91#define WARMBOOT_SEG (KERNBASE + 0x0469)
92
93#ifdef PC98
94#define BIOS_BASE (0xe8000)
95#define BIOS_SIZE (0x18000)
96#else
97#define BIOS_BASE (0xf0000)
98#define BIOS_SIZE (0x10000)
99#endif
100#define BIOS_COUNT (BIOS_SIZE/4)
101
102#define CMOS_REG (0x70)
103#define CMOS_DATA (0x71)
104#define BIOS_RESET (0x0f)
105#define BIOS_WARM (0x0a)
106
107#define PROCENTRY_FLAG_EN 0x01
108#define PROCENTRY_FLAG_BP 0x02
109#define IOAPICENTRY_FLAG_EN 0x01
110
111
112/* MP Floating Pointer Structure */
113typedef struct MPFPS {
114 char signature[4];
115 void *pap;
116 u_char length;
117 u_char spec_rev;
118 u_char checksum;
119 u_char mpfb1;
120 u_char mpfb2;
121 u_char mpfb3;
122 u_char mpfb4;
123 u_char mpfb5;
124} *mpfps_t;
125
126/* MP Configuration Table Header */
127typedef struct MPCTH {
128 char signature[4];
129 u_short base_table_length;
130 u_char spec_rev;
131 u_char checksum;
132 u_char oem_id[8];
133 u_char product_id[12];
134 void *oem_table_pointer;
135 u_short oem_table_size;
136 u_short entry_count;
137 void *apic_address;
138 u_short extended_table_length;
139 u_char extended_table_checksum;
140 u_char reserved;
141} *mpcth_t;
142
143
144typedef struct PROCENTRY {
145 u_char type;
146 u_char apic_id;
147 u_char apic_version;
148 u_char cpu_flags;
149 u_long cpu_signature;
150 u_long feature_flags;
151 u_long reserved1;
152 u_long reserved2;
153} *proc_entry_ptr;
154
155typedef struct BUSENTRY {
156 u_char type;
157 u_char bus_id;
158 char bus_type[6];
159} *bus_entry_ptr;
160
161typedef struct IOAPICENTRY {
162 u_char type;
163 u_char apic_id;
164 u_char apic_version;
165 u_char apic_flags;
166 void *apic_address;
167} *io_apic_entry_ptr;
168
169typedef struct INTENTRY {
170 u_char type;
171 u_char int_type;
172 u_short int_flags;
173 u_char src_bus_id;
174 u_char src_bus_irq;
175 u_char dst_apic_id;
176 u_char dst_apic_int;
177} *int_entry_ptr;
178
179/* descriptions of MP basetable entries */
180typedef struct BASETABLE_ENTRY {
181 u_char type;
182 u_char length;
183 char name[16];
184} basetable_entry;
185
186/*
187 * this code MUST be enabled here and in mpboot.s.
188 * it follows the very early stages of AP boot by placing values in CMOS ram.
189 * it NORMALLY will never be needed and thus the primitive method for enabling.
190 *
191#define CHECK_POINTS
192 */
193
194#if defined(CHECK_POINTS) && !defined(PC98)
195#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197
198#define CHECK_INIT(D); \
199 CHECK_WRITE(0x34, (D)); \
200 CHECK_WRITE(0x35, (D)); \
201 CHECK_WRITE(0x36, (D)); \
202 CHECK_WRITE(0x37, (D)); \
203 CHECK_WRITE(0x38, (D)); \
204 CHECK_WRITE(0x39, (D));
205
206#define CHECK_PRINT(S); \
207 printf("%s: %d, %d, %d, %d, %d, %d\n", \
208 (S), \
209 CHECK_READ(0x34), \
210 CHECK_READ(0x35), \
211 CHECK_READ(0x36), \
212 CHECK_READ(0x37), \
213 CHECK_READ(0x38), \
214 CHECK_READ(0x39));
215
216#else /* CHECK_POINTS */
217
218#define CHECK_INIT(D)
219#define CHECK_PRINT(S)
220
221#endif /* CHECK_POINTS */
222
223/*
224 * Values to send to the POST hardware.
225 */
226#define MP_BOOTADDRESS_POST 0x10
227#define MP_PROBE_POST 0x11
228#define MPTABLE_PASS1_POST 0x12
229
230#define MP_START_POST 0x13
231#define MP_ENABLE_POST 0x14
232#define MPTABLE_PASS2_POST 0x15
233
234#define START_ALL_APS_POST 0x16
235#define INSTALL_AP_TRAMP_POST 0x17
236#define START_AP_POST 0x18
237
238#define MP_ANNOUNCE_POST 0x19
239
240/* used to hold the AP's until we are ready to release them */
241struct mtx ap_boot_mtx;
242
243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244int current_postcode;
245
246/** XXX FIXME: what system files declare these??? */
247extern struct region_descriptor r_gdt, r_idt;
248
249int bsp_apic_ready = 0; /* flags useability of BSP apic */
250int mp_ncpus; /* # of CPUs, including BSP */
251int mp_naps; /* # of Applications processors */
252int mp_nbusses; /* # of busses */
253int mp_napics; /* # of IO APICs */
254int boot_cpu_id; /* designated BSP */
255vm_offset_t cpu_apic_address;
256vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */
257extern int nkpt;
258
259u_int32_t cpu_apic_versions[MAXCPU];
260u_int32_t *io_apic_versions;
261
262#ifdef APIC_INTR_REORDER
263struct {
264 volatile int *location;
265 int bit;
266} apic_isrbit_location[32];
267#endif
268
269struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
270
271/*
272 * APIC ID logical/physical mapping structures.
273 * We oversize these to simplify boot-time config.
274 */
275int cpu_num_to_apic_id[NAPICID];
276int io_num_to_apic_id[NAPICID];
277int apic_id_to_logical[NAPICID];
278
279
280/* Bitmap of all available CPUs */
281u_int all_cpus;
282
283/* AP uses this during bootstrap. Do not staticize. */
284char *bootSTK;
285static int bootAP;
286
287/* Hotwire a 0->4MB V==P mapping */
288extern pt_entry_t *KPTphys;
289
290/* SMP page table page */
291extern pt_entry_t *SMPpt;
292
293struct pcb stoppcbs[MAXCPU];
294
295int smp_started; /* has the system started? */
296int smp_active = 0; /* are the APs allowed to run? */
297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298
299/* XXX maybe should be hw.ncpu */
300static int smp_cpus = 1; /* how many cpu's running */
301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302
303int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305
306/* Enable forwarding of a signal to a process running on a different CPU */
307static int forward_signal_enabled = 1;
308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309 &forward_signal_enabled, 0, "");
310
311/* Enable forwarding of roundrobin to all other cpus */
312static int forward_roundrobin_enabled = 1;
313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314 &forward_roundrobin_enabled, 0, "");
315
316
317/*
318 * Local data and functions.
319 */
320
321/* Set to 1 once we're ready to let the APs out of the pen. */
322static volatile int aps_ready = 0;
323
324static int mp_capable;
325static u_int boot_address;
326static u_int base_memory;
327
328static int picmode; /* 0: virtual wire mode, 1: PIC mode */
329static mpfps_t mpfps;
330static int search_for_sig(u_int32_t target, int count);
331static void mp_enable(u_int boot_addr);
332
333static void mptable_pass1(void);
334static int mptable_pass2(void);
335static void default_mp_table(int type);
336static void fix_mp_table(void);
337static void setup_apic_irq_mapping(void);
338static void init_locks(void);
339static int start_all_aps(u_int boot_addr);
340static void install_ap_tramp(u_int boot_addr);
341static int start_ap(int logicalCpu, u_int boot_addr);
342void ap_init(void);
343static int apic_int_is_bus_type(int intr, int bus_type);
344static void release_aps(void *dummy);
345
346/*
347 * initialize all the SMP locks
348 */
349
350/* critical region around IO APIC, apic_imen */
351struct mtx imen_mtx;
352
353/* lock region used by kernel profiling */
354struct mtx mcount_mtx;
355
356#ifdef USE_COMLOCK
357/* locks com (tty) data/hardware accesses: a FASTINTR() */
358struct mtx com_mtx;
359#endif /* USE_COMLOCK */
360
361/* lock around the MP rendezvous */
362static struct mtx smp_rv_mtx;
363
364/* only 1 CPU can panic at a time :) */
365struct mtx panic_mtx;
366
367static void
368init_locks(void)
369{
370 /*
371 * XXX The mcount mutex probably needs to be statically initialized,
372 * since it will be used even in the function calls that get us to this
373 * point.
374 */
375 mtx_init(&mcount_mtx, "mcount", MTX_DEF);
376
377 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
378 mtx_init(&panic_mtx, "panic", MTX_DEF);
379
380#ifdef USE_COMLOCK
381 mtx_init(&com_mtx, "com", MTX_SPIN);
382#endif /* USE_COMLOCK */
383
384 mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
385}
386
387/*
388 * Calculate usable address in base memory for AP trampoline code.
389 */
390u_int
391mp_bootaddress(u_int basemem)
392{
393 POSTCODE(MP_BOOTADDRESS_POST);
394
395 base_memory = basemem * 1024; /* convert to bytes */
396
397 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
398 if ((base_memory - boot_address) < bootMP_size)
399 boot_address -= 4096; /* not enough, lower by 4k */
400
401 return boot_address;
402}
403
404
405/*
406 * Look for an Intel MP spec table (ie, SMP capable hardware).
407 */
408int
409mp_probe(void)
410{
411 int x;
412 u_long segment;
413 u_int32_t target;
414
415 POSTCODE(MP_PROBE_POST);
416
417 /* see if EBDA exists */
418 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
419 /* search first 1K of EBDA */
420 target = (u_int32_t) (segment << 4);
421 if ((x = search_for_sig(target, 1024 / 4)) >= 0)
422 goto found;
423 } else {
424 /* last 1K of base memory, effective 'top of base' passed in */
425 target = (u_int32_t) (base_memory - 0x400);
426 if ((x = search_for_sig(target, 1024 / 4)) >= 0)
427 goto found;
428 }
429
430 /* search the BIOS */
431 target = (u_int32_t) BIOS_BASE;
432 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
433 goto found;
434
435 /* nothing found */
436 mpfps = (mpfps_t)0;
437 mp_capable = 0;
438 return 0;
439
440found:
441 /* calculate needed resources */
442 mpfps = (mpfps_t)x;
443 mptable_pass1();
444
445 /* flag fact that we are running multiple processors */
446 mp_capable = 1;
447 return 1;
448}
449
450
451/*
452 * Initialize the SMP hardware and the APIC and start up the AP's.
453 */
454void
455mp_start(void)
456{
457 POSTCODE(MP_START_POST);
458
459 /* look for MP capable motherboard */
460 if (mp_capable)
461 mp_enable(boot_address);
462 else
463 panic("MP hardware not found!");
464}
465
466
467/*
468 * Print various information about the SMP system hardware and setup.
469 */
470void
471mp_announce(void)
472{
473 int x;
474
475 POSTCODE(MP_ANNOUNCE_POST);
476
477 printf("FreeBSD/SMP: Multiprocessor motherboard\n");
478 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
479 printf(", version: 0x%08x", cpu_apic_versions[0]);
480 printf(", at 0x%08x\n", cpu_apic_address);
481 for (x = 1; x <= mp_naps; ++x) {
482 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
483 printf(", version: 0x%08x", cpu_apic_versions[x]);
484 printf(", at 0x%08x\n", cpu_apic_address);
485 }
486
487#if defined(APIC_IO)
488 for (x = 0; x < mp_napics; ++x) {
489 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
490 printf(", version: 0x%08x", io_apic_versions[x]);
491 printf(", at 0x%08x\n", io_apic_address[x]);
492 }
493#else
494 printf(" Warning: APIC I/O disabled\n");
495#endif /* APIC_IO */
496}
497
498/*
499 * AP cpu's call this to sync up protected mode.
500 */
501void
502init_secondary(void)
503{
504 int gsel_tss;
505 int x, myid = bootAP;
506
507 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
508 gdt_segs[GPROC0_SEL].ssd_base =
509 (int) &SMP_prvspace[myid].globaldata.gd_common_tss;
510 SMP_prvspace[myid].globaldata.gd_prvspace =
511 &SMP_prvspace[myid].globaldata;
512
513 for (x = 0; x < NGDT; x++) {
514 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
515 }
516
517 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
518 r_gdt.rd_base = (int) &gdt[myid * NGDT];
519 lgdt(&r_gdt); /* does magic intra-segment return */
520
521 lidt(&r_idt);
522
523 lldt(_default_ldt);
524#ifdef USER_LDT
525 PCPU_SET(currentldt, _default_ldt);
526#endif
527
528 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
529 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
530 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
531 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
532 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
533 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
534 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
535 ltr(gsel_tss);
536
537 pmap_set_opt();
538}
539
540
541#if defined(APIC_IO)
542/*
543 * Final configuration of the BSP's local APIC:
544 * - disable 'pic mode'.
545 * - disable 'virtual wire mode'.
546 * - enable NMI.
547 */
548void
549bsp_apic_configure(void)
550{
551 u_char byte;
552 u_int32_t temp;
553
554 /* leave 'pic mode' if necessary */
555 if (picmode) {
556 outb(0x22, 0x70); /* select IMCR */
557 byte = inb(0x23); /* current contents */
558 byte |= 0x01; /* mask external INTR */
559 outb(0x23, byte); /* disconnect 8259s/NMI */
560 }
561
562 /* mask lint0 (the 8259 'virtual wire' connection) */
563 temp = lapic.lvt_lint0;
564 temp |= APIC_LVT_M; /* set the mask */
565 lapic.lvt_lint0 = temp;
566
567 /* setup lint1 to handle NMI */
568 temp = lapic.lvt_lint1;
569 temp &= ~APIC_LVT_M; /* clear the mask */
570 lapic.lvt_lint1 = temp;
571
572 if (bootverbose)
573 apic_dump("bsp_apic_configure()");
574}
575#endif /* APIC_IO */
576
577
578/*******************************************************************
579 * local functions and data
580 */
581
582/*
583 * start the SMP system
584 */
585static void
586mp_enable(u_int boot_addr)
587{
588 int x;
589#if defined(APIC_IO)
590 int apic;
591 u_int ux;
592#endif /* APIC_IO */
593
594 POSTCODE(MP_ENABLE_POST);
595
596 /* turn on 4MB of V == P addressing so we can get to MP table */
597 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
598 invltlb();
599
600 /* examine the MP table for needed info, uses physical addresses */
601 x = mptable_pass2();
602
603 *(int *)PTD = 0;
604 invltlb();
605
606 /* can't process default configs till the CPU APIC is pmapped */
607 if (x)
608 default_mp_table(x);
609
610 /* post scan cleanup */
611 fix_mp_table();
612 setup_apic_irq_mapping();
613
614#if defined(APIC_IO)
615
616 /* fill the LOGICAL io_apic_versions table */
617 for (apic = 0; apic < mp_napics; ++apic) {
618 ux = io_apic_read(apic, IOAPIC_VER);
619 io_apic_versions[apic] = ux;
620 io_apic_set_id(apic, IO_TO_ID(apic));
621 }
622
623 /* program each IO APIC in the system */
624 for (apic = 0; apic < mp_napics; ++apic)
625 if (io_apic_setup(apic) < 0)
626 panic("IO APIC setup failure");
627
628 /* install a 'Spurious INTerrupt' vector */
629 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
630 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
631
632 /* install an inter-CPU IPI for TLB invalidation */
633 setidt(XINVLTLB_OFFSET, Xinvltlb,
634 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
635
636#ifdef BETTER_CLOCK
637 /* install an inter-CPU IPI for reading processor state */
638 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
639 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
640#endif
641
642 /* install an inter-CPU IPI for all-CPU rendezvous */
643 setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
644 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
645
646 /* install an inter-CPU IPI for forcing an additional software trap */
647 setidt(XCPUAST_OFFSET, Xcpuast,
648 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
649
650 /* install an inter-CPU IPI for CPU stop/restart */
651 setidt(XCPUSTOP_OFFSET, Xcpustop,
652 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
653
654#if defined(TEST_TEST1)
655 /* install a "fake hardware INTerrupt" vector */
656 setidt(XTEST1_OFFSET, Xtest1,
657 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
658#endif /** TEST_TEST1 */
659
660#endif /* APIC_IO */
661
662 /* initialize all SMP locks */
663 init_locks();
664
665 /* start each Application Processor */
666 start_all_aps(boot_addr);
667}
668
669
670/*
671 * look for the MP spec signature
672 */
673
674/* string defined by the Intel MP Spec as identifying the MP table */
675#define MP_SIG 0x5f504d5f /* _MP_ */
676#define NEXT(X) ((X) += 4)
677static int
678search_for_sig(u_int32_t target, int count)
679{
680 int x;
681 u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
682
683 for (x = 0; x < count; NEXT(x))
684 if (addr[x] == MP_SIG)
685 /* make array index a byte index */
686 return (target + (x * sizeof(u_int32_t)));
687
688 return -1;
689}
690
691
692static basetable_entry basetable_entry_types[] =
693{
694 {0, 20, "Processor"},
695 {1, 8, "Bus"},
696 {2, 8, "I/O APIC"},
697 {3, 8, "I/O INT"},
698 {4, 8, "Local INT"}
699};
700
701typedef struct BUSDATA {
702 u_char bus_id;
703 enum busTypes bus_type;
704} bus_datum;
705
706typedef struct INTDATA {
707 u_char int_type;
708 u_short int_flags;
709 u_char src_bus_id;
710 u_char src_bus_irq;
711 u_char dst_apic_id;
712 u_char dst_apic_int;
713 u_char int_vector;
714} io_int, local_int;
715
716typedef struct BUSTYPENAME {
717 u_char type;
718 char name[7];
719} bus_type_name;
720
721static bus_type_name bus_type_table[] =
722{
723 {CBUS, "CBUS"},
724 {CBUSII, "CBUSII"},
725 {EISA, "EISA"},
726 {MCA, "MCA"},
727 {UNKNOWN_BUSTYPE, "---"},
728 {ISA, "ISA"},
729 {MCA, "MCA"},
730 {UNKNOWN_BUSTYPE, "---"},
731 {UNKNOWN_BUSTYPE, "---"},
732 {UNKNOWN_BUSTYPE, "---"},
733 {UNKNOWN_BUSTYPE, "---"},
734 {UNKNOWN_BUSTYPE, "---"},
735 {PCI, "PCI"},
736 {UNKNOWN_BUSTYPE, "---"},
737 {UNKNOWN_BUSTYPE, "---"},
738 {UNKNOWN_BUSTYPE, "---"},
739 {UNKNOWN_BUSTYPE, "---"},
740 {XPRESS, "XPRESS"},
741 {UNKNOWN_BUSTYPE, "---"}
742};
743/* from MP spec v1.4, table 5-1 */
744static int default_data[7][5] =
745{
746/* nbus, id0, type0, id1, type1 */
747 {1, 0, ISA, 255, 255},
748 {1, 0, EISA, 255, 255},
749 {1, 0, EISA, 255, 255},
750 {1, 0, MCA, 255, 255},
751 {2, 0, ISA, 1, PCI},
752 {2, 0, EISA, 1, PCI},
753 {2, 0, MCA, 1, PCI}
754};
755
756
757/* the bus data */
758static bus_datum *bus_data;
759
760/* the IO INT data, one entry per possible APIC INTerrupt */
761static io_int *io_apic_ints;
762
763static int nintrs;
764
765static int processor_entry __P((proc_entry_ptr entry, int cpu));
766static int bus_entry __P((bus_entry_ptr entry, int bus));
767static int io_apic_entry __P((io_apic_entry_ptr entry, int apic));
768static int int_entry __P((int_entry_ptr entry, int intr));
769static int lookup_bus_type __P((char *name));
770
771
772/*
773 * 1st pass on motherboard's Intel MP specification table.
774 *
775 * initializes:
776 * mp_ncpus = 1
777 *
778 * determines:
779 * cpu_apic_address (common to all CPUs)
780 * io_apic_address[N]
781 * mp_naps
782 * mp_nbusses
783 * mp_napics
784 * nintrs
785 */
786static void
787mptable_pass1(void)
788{
789 int x;
790 mpcth_t cth;
791 int totalSize;
792 void* position;
793 int count;
794 int type;
795
796 POSTCODE(MPTABLE_PASS1_POST);
797
798 /* clear various tables */
799 for (x = 0; x < NAPICID; ++x) {
800 io_apic_address[x] = ~0; /* IO APIC address table */
801 }
802
803 /* init everything to empty */
804 mp_naps = 0;
805 mp_nbusses = 0;
806 mp_napics = 0;
807 nintrs = 0;
808
809 /* check for use of 'default' configuration */
810 if (MPFPS_MPFB1 != 0) {
811 /* use default addresses */
812 cpu_apic_address = DEFAULT_APIC_BASE;
813 io_apic_address[0] = DEFAULT_IO_APIC_BASE;
814
815 /* fill in with defaults */
816 mp_naps = 2; /* includes BSP */
817 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
818#if defined(APIC_IO)
819 mp_napics = 1;
820 nintrs = 16;
821#endif /* APIC_IO */
822 }
823 else {
824 if ((cth = mpfps->pap) == 0)
825 panic("MP Configuration Table Header MISSING!");
826
827 cpu_apic_address = (vm_offset_t) cth->apic_address;
828
829 /* walk the table, recording info of interest */
830 totalSize = cth->base_table_length - sizeof(struct MPCTH);
831 position = (u_char *) cth + sizeof(struct MPCTH);
832 count = cth->entry_count;
833
834 while (count--) {
835 switch (type = *(u_char *) position) {
836 case 0: /* processor_entry */
837 if (((proc_entry_ptr)position)->cpu_flags
838 & PROCENTRY_FLAG_EN)
839 ++mp_naps;
840 break;
841 case 1: /* bus_entry */
842 ++mp_nbusses;
843 break;
844 case 2: /* io_apic_entry */
845 if (((io_apic_entry_ptr)position)->apic_flags
846 & IOAPICENTRY_FLAG_EN)
847 io_apic_address[mp_napics++] =
848 (vm_offset_t)((io_apic_entry_ptr)
849 position)->apic_address;
850 break;
851 case 3: /* int_entry */
852 ++nintrs;
853 break;
854 case 4: /* int_entry */
855 break;
856 default:
857 panic("mpfps Base Table HOSED!");
858 /* NOTREACHED */
859 }
860
861 totalSize -= basetable_entry_types[type].length;
862 (u_char*)position += basetable_entry_types[type].length;
863 }
864 }
865
866 /* qualify the numbers */
867 if (mp_naps > MAXCPU) {
868 printf("Warning: only using %d of %d available CPUs!\n",
869 MAXCPU, mp_naps);
870 mp_naps = MAXCPU;
871 }
872
873 /*
874 * Count the BSP.
875 * This is also used as a counter while starting the APs.
876 */
877 mp_ncpus = 1;
878
879 --mp_naps; /* subtract the BSP */
880}
881
882
883/*
884 * 2nd pass on motherboard's Intel MP specification table.
885 *
886 * sets:
887 * boot_cpu_id
888 * ID_TO_IO(N), phy APIC ID to log CPU/IO table
889 * CPU_TO_ID(N), logical CPU to APIC ID table
890 * IO_TO_ID(N), logical IO to APIC ID table
891 * bus_data[N]
892 * io_apic_ints[N]
893 */
894static int
895mptable_pass2(void)
896{
897 int x;
898 mpcth_t cth;
899 int totalSize;
900 void* position;
901 int count;
902 int type;
903 int apic, bus, cpu, intr;
904 int i, j;
905 int pgeflag;
906
907 POSTCODE(MPTABLE_PASS2_POST);
908
909 pgeflag = 0; /* XXX - Not used under SMP yet. */
910
911 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
912 M_DEVBUF, M_WAITOK);
913 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
914 M_DEVBUF, M_WAITOK);
915 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
916 M_DEVBUF, M_WAITOK);
917 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
918 M_DEVBUF, M_WAITOK);
919
920 bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
921
922 for (i = 0; i < mp_napics; i++) {
923 for (j = 0; j < mp_napics; j++) {
924 /* same page frame as a previous IO apic? */
925 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
926 (io_apic_address[i] & PG_FRAME)) {
927 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
928 + (NPTEPG-2-j) * PAGE_SIZE
929 + (io_apic_address[i] & PAGE_MASK));
930 break;
931 }
932 /* use this slot if available */
933 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
934 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
935 pgeflag | (io_apic_address[i] & PG_FRAME));
936 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
937 + (NPTEPG-2-j) * PAGE_SIZE
938 + (io_apic_address[i] & PAGE_MASK));
939 break;
940 }
941 }
942 }
943
944 /* clear various tables */
945 for (x = 0; x < NAPICID; ++x) {
946 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */
947 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */
948 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */
949 }
950
951 /* clear bus data table */
952 for (x = 0; x < mp_nbusses; ++x)
953 bus_data[x].bus_id = 0xff;
954
955 /* clear IO APIC INT table */
956 for (x = 0; x < (nintrs + 1); ++x) {
957 io_apic_ints[x].int_type = 0xff;
958 io_apic_ints[x].int_vector = 0xff;
959 }
960
961 /* setup the cpu/apic mapping arrays */
962 boot_cpu_id = -1;
963
964 /* record whether PIC or virtual-wire mode */
965 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
966
967 /* check for use of 'default' configuration */
968 if (MPFPS_MPFB1 != 0)
969 return MPFPS_MPFB1; /* return default configuration type */
970
971 if ((cth = mpfps->pap) == 0)
972 panic("MP Configuration Table Header MISSING!");
973
974 /* walk the table, recording info of interest */
975 totalSize = cth->base_table_length - sizeof(struct MPCTH);
976 position = (u_char *) cth + sizeof(struct MPCTH);
977 count = cth->entry_count;
978 apic = bus = intr = 0;
979 cpu = 1; /* pre-count the BSP */
980
981 while (count--) {
982 switch (type = *(u_char *) position) {
983 case 0:
984 if (processor_entry(position, cpu))
985 ++cpu;
986 break;
987 case 1:
988 if (bus_entry(position, bus))
989 ++bus;
990 break;
991 case 2:
992 if (io_apic_entry(position, apic))
993 ++apic;
994 break;
995 case 3:
996 if (int_entry(position, intr))
997 ++intr;
998 break;
999 case 4:
1000 /* int_entry(position); */
1001 break;
1002 default:
1003 panic("mpfps Base Table HOSED!");
1004 /* NOTREACHED */
1005 }
1006
1007 totalSize -= basetable_entry_types[type].length;
1008 (u_char *) position += basetable_entry_types[type].length;
1009 }
1010
1011 if (boot_cpu_id == -1)
1012 panic("NO BSP found!");
1013
1014 /* report fact that its NOT a default configuration */
1015 return 0;
1016}
1017
1018
1019void
1020assign_apic_irq(int apic, int intpin, int irq)
1021{
1022 int x;
1023
1024 if (int_to_apicintpin[irq].ioapic != -1)
1025 panic("assign_apic_irq: inconsistent table");
1026
1027 int_to_apicintpin[irq].ioapic = apic;
1028 int_to_apicintpin[irq].int_pin = intpin;
1029 int_to_apicintpin[irq].apic_address = ioapic[apic];
1030 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1031
1032 for (x = 0; x < nintrs; x++) {
1033 if ((io_apic_ints[x].int_type == 0 ||
1034 io_apic_ints[x].int_type == 3) &&
1035 io_apic_ints[x].int_vector == 0xff &&
1036 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1037 io_apic_ints[x].dst_apic_int == intpin)
1038 io_apic_ints[x].int_vector = irq;
1039 }
1040}
1041
1042void
1043revoke_apic_irq(int irq)
1044{
1045 int x;
1046 int oldapic;
1047 int oldintpin;
1048
1049 if (int_to_apicintpin[irq].ioapic == -1)
1050 panic("assign_apic_irq: inconsistent table");
1051
1052 oldapic = int_to_apicintpin[irq].ioapic;
1053 oldintpin = int_to_apicintpin[irq].int_pin;
1054
1055 int_to_apicintpin[irq].ioapic = -1;
1056 int_to_apicintpin[irq].int_pin = 0;
1057 int_to_apicintpin[irq].apic_address = NULL;
1058 int_to_apicintpin[irq].redirindex = 0;
1059
1060 for (x = 0; x < nintrs; x++) {
1061 if ((io_apic_ints[x].int_type == 0 ||
1062 io_apic_ints[x].int_type == 3) &&
1063 io_apic_ints[x].int_vector == 0xff &&
1064 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1065 io_apic_ints[x].dst_apic_int == oldintpin)
1066 io_apic_ints[x].int_vector = 0xff;
1067 }
1068}
1069
1070
1071static void
1072allocate_apic_irq(int intr)
1073{
1074 int apic;
1075 int intpin;
1076 int irq;
1077
1078 if (io_apic_ints[intr].int_vector != 0xff)
1079 return; /* Interrupt handler already assigned */
1080
1081 if (io_apic_ints[intr].int_type != 0 &&
1082 (io_apic_ints[intr].int_type != 3 ||
1083 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1084 io_apic_ints[intr].dst_apic_int == 0)))
1085 return; /* Not INT or ExtInt on != (0, 0) */
1086
1087 irq = 0;
1088 while (irq < APIC_INTMAPSIZE &&
1089 int_to_apicintpin[irq].ioapic != -1)
1090 irq++;
1091
1092 if (irq >= APIC_INTMAPSIZE)
1093 return; /* No free interrupt handlers */
1094
1095 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1096 intpin = io_apic_ints[intr].dst_apic_int;
1097
1098 assign_apic_irq(apic, intpin, irq);
1099 io_apic_setup_intpin(apic, intpin);
1100}
1101
1102
1103static void
1104swap_apic_id(int apic, int oldid, int newid)
1105{
1106 int x;
1107 int oapic;
1108
1109
1110 if (oldid == newid)
1111 return; /* Nothing to do */
1112
1113 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1114 apic, oldid, newid);
1115
1116 /* Swap physical APIC IDs in interrupt entries */
1117 for (x = 0; x < nintrs; x++) {
1118 if (io_apic_ints[x].dst_apic_id == oldid)
1119 io_apic_ints[x].dst_apic_id = newid;
1120 else if (io_apic_ints[x].dst_apic_id == newid)
1121 io_apic_ints[x].dst_apic_id = oldid;
1122 }
1123
1124 /* Swap physical APIC IDs in IO_TO_ID mappings */
1125 for (oapic = 0; oapic < mp_napics; oapic++)
1126 if (IO_TO_ID(oapic) == newid)
1127 break;
1128
1129 if (oapic < mp_napics) {
1130 printf("Changing APIC ID for IO APIC #%d from "
1131 "%d to %d in MP table\n",
1132 oapic, newid, oldid);
1133 IO_TO_ID(oapic) = oldid;
1134 }
1135 IO_TO_ID(apic) = newid;
1136}
1137
1138
1139static void
1140fix_id_to_io_mapping(void)
1141{
1142 int x;
1143
1144 for (x = 0; x < NAPICID; x++)
1145 ID_TO_IO(x) = -1;
1146
1147 for (x = 0; x <= mp_naps; x++)
1148 if (CPU_TO_ID(x) < NAPICID)
1149 ID_TO_IO(CPU_TO_ID(x)) = x;
1150
1151 for (x = 0; x < mp_napics; x++)
1152 if (IO_TO_ID(x) < NAPICID)
1153 ID_TO_IO(IO_TO_ID(x)) = x;
1154}
1155
1156
1157static int
1158first_free_apic_id(void)
1159{
1160 int freeid, x;
1161
1162 for (freeid = 0; freeid < NAPICID; freeid++) {
1163 for (x = 0; x <= mp_naps; x++)
1164 if (CPU_TO_ID(x) == freeid)
1165 break;
1166 if (x <= mp_naps)
1167 continue;
1168 for (x = 0; x < mp_napics; x++)
1169 if (IO_TO_ID(x) == freeid)
1170 break;
1171 if (x < mp_napics)
1172 continue;
1173 return freeid;
1174 }
1175 return freeid;
1176}
1177
1178
1179static int
1180io_apic_id_acceptable(int apic, int id)
1181{
1182 int cpu; /* Logical CPU number */
1183 int oapic; /* Logical IO APIC number for other IO APIC */
1184
1185 if (id >= NAPICID)
1186 return 0; /* Out of range */
1187
1188 for (cpu = 0; cpu <= mp_naps; cpu++)
1189 if (CPU_TO_ID(cpu) == id)
1190 return 0; /* Conflict with CPU */
1191
1192 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1193 if (IO_TO_ID(oapic) == id)
1194 return 0; /* Conflict with other APIC */
1195
1196 return 1; /* ID is acceptable for IO APIC */
1197}
1198
1199
1200/*
1201 * parse an Intel MP specification table
1202 */
1203static void
1204fix_mp_table(void)
1205{
1206 int x;
1207 int id;
1208 int bus_0 = 0; /* Stop GCC warning */
1209 int bus_pci = 0; /* Stop GCC warning */
1210 int num_pci_bus;
1211 int apic; /* IO APIC unit number */
1212 int freeid; /* Free physical APIC ID */
1213 int physid; /* Current physical IO APIC ID */
1214
1215 /*
1216 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1217 * did it wrong. The MP spec says that when more than 1 PCI bus
1218 * exists the BIOS must begin with bus entries for the PCI bus and use
1219 * actual PCI bus numbering. This implies that when only 1 PCI bus
1220 * exists the BIOS can choose to ignore this ordering, and indeed many
1221 * MP motherboards do ignore it. This causes a problem when the PCI
1222 * sub-system makes requests of the MP sub-system based on PCI bus
1223 * numbers. So here we look for the situation and renumber the
1224 * busses and associated INTs in an effort to "make it right".
1225 */
1226
1227 /* find bus 0, PCI bus, count the number of PCI busses */
1228 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1229 if (bus_data[x].bus_id == 0) {
1230 bus_0 = x;
1231 }
1232 if (bus_data[x].bus_type == PCI) {
1233 ++num_pci_bus;
1234 bus_pci = x;
1235 }
1236 }
1237 /*
1238 * bus_0 == slot of bus with ID of 0
1239 * bus_pci == slot of last PCI bus encountered
1240 */
1241
1242 /* check the 1 PCI bus case for sanity */
1243 /* if it is number 0 all is well */
1244 if (num_pci_bus == 1 &&
1245 bus_data[bus_pci].bus_id != 0) {
1246
1247 /* mis-numbered, swap with whichever bus uses slot 0 */
1248
1249 /* swap the bus entry types */
1250 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1251 bus_data[bus_0].bus_type = PCI;
1252
1253 /* swap each relavant INTerrupt entry */
1254 id = bus_data[bus_pci].bus_id;
1255 for (x = 0; x < nintrs; ++x) {
1256 if (io_apic_ints[x].src_bus_id == id) {
1257 io_apic_ints[x].src_bus_id = 0;
1258 }
1259 else if (io_apic_ints[x].src_bus_id == 0) {
1260 io_apic_ints[x].src_bus_id = id;
1261 }
1262 }
1263 }
1264
1265 /* Assign IO APIC IDs.
1266 *
1267 * First try the existing ID. If a conflict is detected, try
1268 * the ID in the MP table. If a conflict is still detected, find
1269 * a free id.
1270 *
1271 * We cannot use the ID_TO_IO table before all conflicts has been
1272 * resolved and the table has been corrected.
1273 */
1274 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1275
1276 /* First try to use the value set by the BIOS */
1277 physid = io_apic_get_id(apic);
1278 if (io_apic_id_acceptable(apic, physid)) {
1279 if (IO_TO_ID(apic) != physid)
1280 swap_apic_id(apic, IO_TO_ID(apic), physid);
1281 continue;
1282 }
1283
1284 /* Then check if the value in the MP table is acceptable */
1285 if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1286 continue;
1287
1288 /* Last resort, find a free APIC ID and use it */
1289 freeid = first_free_apic_id();
1290 if (freeid >= NAPICID)
1291 panic("No free physical APIC IDs found");
1292
1293 if (io_apic_id_acceptable(apic, freeid)) {
1294 swap_apic_id(apic, IO_TO_ID(apic), freeid);
1295 continue;
1296 }
1297 panic("Free physical APIC ID not usable");
1298 }
1299 fix_id_to_io_mapping();
1300
1301 /* detect and fix broken Compaq MP table */
1302 if (apic_int_type(0, 0) == -1) {
1303 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1304 io_apic_ints[nintrs].int_type = 3; /* ExtInt */
1305 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */
1306 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1307 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1308 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */
1309 nintrs++;
1310 }
1311}
1312
1313
1314/* Assign low level interrupt handlers */
1315static void
1316setup_apic_irq_mapping(void)
1317{
1318 int x;
1319 int int_vector;
1320
1321 /* Clear array */
1322 for (x = 0; x < APIC_INTMAPSIZE; x++) {
1323 int_to_apicintpin[x].ioapic = -1;
1324 int_to_apicintpin[x].int_pin = 0;
1325 int_to_apicintpin[x].apic_address = NULL;
1326 int_to_apicintpin[x].redirindex = 0;
1327 }
1328
1329 /* First assign ISA/EISA interrupts */
1330 for (x = 0; x < nintrs; x++) {
1331 int_vector = io_apic_ints[x].src_bus_irq;
1332 if (int_vector < APIC_INTMAPSIZE &&
1333 io_apic_ints[x].int_vector == 0xff &&
1334 int_to_apicintpin[int_vector].ioapic == -1 &&
1335 (apic_int_is_bus_type(x, ISA) ||
1336 apic_int_is_bus_type(x, EISA)) &&
1337 io_apic_ints[x].int_type == 0) {
1338 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1339 io_apic_ints[x].dst_apic_int,
1340 int_vector);
1341 }
1342 }
1343
1344 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1345 for (x = 0; x < nintrs; x++) {
1346 if (io_apic_ints[x].dst_apic_int == 0 &&
1347 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1348 io_apic_ints[x].int_vector == 0xff &&
1349 int_to_apicintpin[0].ioapic == -1 &&
1350 io_apic_ints[x].int_type == 3) {
1351 assign_apic_irq(0, 0, 0);
1352 break;
1353 }
1354 }
1355 /* PCI interrupt assignment is deferred */
1356}
1357
1358
1359static int
1360processor_entry(proc_entry_ptr entry, int cpu)
1361{
1362 /* check for usability */
1363 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1364 return 0;
1365
1366 if(entry->apic_id >= NAPICID)
1367 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1368 /* check for BSP flag */
1369 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1370 boot_cpu_id = entry->apic_id;
1371 CPU_TO_ID(0) = entry->apic_id;
1372 ID_TO_CPU(entry->apic_id) = 0;
1373 return 0; /* its already been counted */
1374 }
1375
1376 /* add another AP to list, if less than max number of CPUs */
1377 else if (cpu < MAXCPU) {
1378 CPU_TO_ID(cpu) = entry->apic_id;
1379 ID_TO_CPU(entry->apic_id) = cpu;
1380 return 1;
1381 }
1382
1383 return 0;
1384}
1385
1386
1387static int
1388bus_entry(bus_entry_ptr entry, int bus)
1389{
1390 int x;
1391 char c, name[8];
1392
1393 /* encode the name into an index */
1394 for (x = 0; x < 6; ++x) {
1395 if ((c = entry->bus_type[x]) == ' ')
1396 break;
1397 name[x] = c;
1398 }
1399 name[x] = '\0';
1400
1401 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1402 panic("unknown bus type: '%s'", name);
1403
1404 bus_data[bus].bus_id = entry->bus_id;
1405 bus_data[bus].bus_type = x;
1406
1407 return 1;
1408}
1409
1410
1411static int
1412io_apic_entry(io_apic_entry_ptr entry, int apic)
1413{
1414 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1415 return 0;
1416
1417 IO_TO_ID(apic) = entry->apic_id;
1418 if (entry->apic_id < NAPICID)
1419 ID_TO_IO(entry->apic_id) = apic;
1420
1421 return 1;
1422}
1423
1424
1425static int
1426lookup_bus_type(char *name)
1427{
1428 int x;
1429
1430 for (x = 0; x < MAX_BUSTYPE; ++x)
1431 if (strcmp(bus_type_table[x].name, name) == 0)
1432 return bus_type_table[x].type;
1433
1434 return UNKNOWN_BUSTYPE;
1435}
1436
1437
1438static int
1439int_entry(int_entry_ptr entry, int intr)
1440{
1441 int apic;
1442
1443 io_apic_ints[intr].int_type = entry->int_type;
1444 io_apic_ints[intr].int_flags = entry->int_flags;
1445 io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1446 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1447 if (entry->dst_apic_id == 255) {
1448 /* This signal goes to all IO APICS. Select an IO APIC
1449 with sufficient number of interrupt pins */
1450 for (apic = 0; apic < mp_napics; apic++)
1451 if (((io_apic_read(apic, IOAPIC_VER) &
1452 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1453 entry->dst_apic_int)
1454 break;
1455 if (apic < mp_napics)
1456 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1457 else
1458 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1459 } else
1460 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1461 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1462
1463 return 1;
1464}
1465
1466
1467static int
1468apic_int_is_bus_type(int intr, int bus_type)
1469{
1470 int bus;
1471
1472 for (bus = 0; bus < mp_nbusses; ++bus)
1473 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1474 && ((int) bus_data[bus].bus_type == bus_type))
1475 return 1;
1476
1477 return 0;
1478}
1479
1480
1481/*
1482 * Given a traditional ISA INT mask, return an APIC mask.
1483 */
1484u_int
1485isa_apic_mask(u_int isa_mask)
1486{
1487 int isa_irq;
1488 int apic_pin;
1489
1490#if defined(SKIP_IRQ15_REDIRECT)
1491 if (isa_mask == (1 << 15)) {
1492 printf("skipping ISA IRQ15 redirect\n");
1493 return isa_mask;
1494 }
1495#endif /* SKIP_IRQ15_REDIRECT */
1496
1497 isa_irq = ffs(isa_mask); /* find its bit position */
1498 if (isa_irq == 0) /* doesn't exist */
1499 return 0;
1500 --isa_irq; /* make it zero based */
1501
1502 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */
1503 if (apic_pin == -1)
1504 return 0;
1505
1506 return (1 << apic_pin); /* convert pin# to a mask */
1507}
1508
1509
1510/*
1511 * Determine which APIC pin an ISA/EISA INT is attached to.
1512 */
1513#define INTTYPE(I) (io_apic_ints[(I)].int_type)
1514#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int)
1515#define INTIRQ(I) (io_apic_ints[(I)].int_vector)
1516#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1517
1518#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq)
1519int
1520isa_apic_irq(int isa_irq)
1521{
1522 int intr;
1523
1524 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1525 if (INTTYPE(intr) == 0) { /* standard INT */
1526 if (SRCBUSIRQ(intr) == isa_irq) {
1527 if (apic_int_is_bus_type(intr, ISA) ||
1528 apic_int_is_bus_type(intr, EISA)) {
1529 if (INTIRQ(intr) == 0xff)
1530 return -1; /* unassigned */
1531 return INTIRQ(intr); /* found */
1532 }
1533 }
1534 }
1535 }
1536 return -1; /* NOT found */
1537}
1538
1539
1540/*
1541 * Determine which APIC pin a PCI INT is attached to.
1542 */
1543#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id)
1544#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1545#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03)
1546int
1547pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1548{
1549 int intr;
1550
1551 --pciInt; /* zero based */
1552
1553 for (intr = 0; intr < nintrs; ++intr) /* check each record */
1554 if ((INTTYPE(intr) == 0) /* standard INT */
1555 && (SRCBUSID(intr) == pciBus)
1556 && (SRCBUSDEVICE(intr) == pciDevice)
1557 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */
1558 if (apic_int_is_bus_type(intr, PCI)) {
1559 if (INTIRQ(intr) == 0xff)
1560 allocate_apic_irq(intr);
1561 if (INTIRQ(intr) == 0xff)
1562 return -1; /* unassigned */
1563 return INTIRQ(intr); /* exact match */
1564 }
1565
1566 return -1; /* NOT found */
1567}
1568
1569int
1570next_apic_irq(int irq)
1571{
1572 int intr, ointr;
1573 int bus, bustype;
1574
1575 bus = 0;
1576 bustype = 0;
1577 for (intr = 0; intr < nintrs; intr++) {
1578 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1579 continue;
1580 bus = SRCBUSID(intr);
1581 bustype = apic_bus_type(bus);
1582 if (bustype != ISA &&
1583 bustype != EISA &&
1584 bustype != PCI)
1585 continue;
1586 break;
1587 }
1588 if (intr >= nintrs) {
1589 return -1;
1590 }
1591 for (ointr = intr + 1; ointr < nintrs; ointr++) {
1592 if (INTTYPE(ointr) != 0)
1593 continue;
1594 if (bus != SRCBUSID(ointr))
1595 continue;
1596 if (bustype == PCI) {
1597 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1598 continue;
1599 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1600 continue;
1601 }
1602 if (bustype == ISA || bustype == EISA) {
1603 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1604 continue;
1605 }
1606 if (INTPIN(intr) == INTPIN(ointr))
1607 continue;
1608 break;
1609 }
1610 if (ointr >= nintrs) {
1611 return -1;
1612 }
1613 return INTIRQ(ointr);
1614}
1615#undef SRCBUSLINE
1616#undef SRCBUSDEVICE
1617#undef SRCBUSID
1618#undef SRCBUSIRQ
1619
1620#undef INTPIN
1621#undef INTIRQ
1622#undef INTAPIC
1623#undef INTTYPE
1624
1625
1626/*
1627 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1628 *
1629 * XXX FIXME:
1630 * Exactly what this means is unclear at this point. It is a solution
1631 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard
1632 * could route any of the ISA INTs to upper (>15) IRQ values. But most would
1633 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1634 * option.
1635 */
1636int
1637undirect_isa_irq(int rirq)
1638{
1639#if defined(READY)
1640 if (bootverbose)
1641 printf("Freeing redirected ISA irq %d.\n", rirq);
1642 /** FIXME: tickle the MB redirector chip */
1643 return -1;
1644#else
1645 if (bootverbose)
1646 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1647 return 0;
1648#endif /* READY */
1649}
1650
1651
1652/*
1653 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1654 */
1655int
1656undirect_pci_irq(int rirq)
1657{
1658#if defined(READY)
1659 if (bootverbose)
1660 printf("Freeing redirected PCI irq %d.\n", rirq);
1661
1662 /** FIXME: tickle the MB redirector chip */
1663 return -1;
1664#else
1665 if (bootverbose)
1666 printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1667 rirq);
1668 return 0;
1669#endif /* READY */
1670}
1671
1672
1673/*
1674 * given a bus ID, return:
1675 * the bus type if found
1676 * -1 if NOT found
1677 */
1678int
1679apic_bus_type(int id)
1680{
1681 int x;
1682
1683 for (x = 0; x < mp_nbusses; ++x)
1684 if (bus_data[x].bus_id == id)
1685 return bus_data[x].bus_type;
1686
1687 return -1;
1688}
1689
1690
1691/*
1692 * given a LOGICAL APIC# and pin#, return:
1693 * the associated src bus ID if found
1694 * -1 if NOT found
1695 */
1696int
1697apic_src_bus_id(int apic, int pin)
1698{
1699 int x;
1700
1701 /* search each of the possible INTerrupt sources */
1702 for (x = 0; x < nintrs; ++x)
1703 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1704 (pin == io_apic_ints[x].dst_apic_int))
1705 return (io_apic_ints[x].src_bus_id);
1706
1707 return -1; /* NOT found */
1708}
1709
1710
1711/*
1712 * given a LOGICAL APIC# and pin#, return:
1713 * the associated src bus IRQ if found
1714 * -1 if NOT found
1715 */
1716int
1717apic_src_bus_irq(int apic, int pin)
1718{
1719 int x;
1720
1721 for (x = 0; x < nintrs; x++)
1722 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1723 (pin == io_apic_ints[x].dst_apic_int))
1724 return (io_apic_ints[x].src_bus_irq);
1725
1726 return -1; /* NOT found */
1727}
1728
1729
1730/*
1731 * given a LOGICAL APIC# and pin#, return:
1732 * the associated INTerrupt type if found
1733 * -1 if NOT found
1734 */
1735int
1736apic_int_type(int apic, int pin)
1737{
1738 int x;
1739
1740 /* search each of the possible INTerrupt sources */
1741 for (x = 0; x < nintrs; ++x)
1742 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1743 (pin == io_apic_ints[x].dst_apic_int))
1744 return (io_apic_ints[x].int_type);
1745
1746 return -1; /* NOT found */
1747}
1748
1749int
1750apic_irq(int apic, int pin)
1751{
1752 int x;
1753 int res;
1754
1755 for (x = 0; x < nintrs; ++x)
1756 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1757 (pin == io_apic_ints[x].dst_apic_int)) {
1758 res = io_apic_ints[x].int_vector;
1759 if (res == 0xff)
1760 return -1;
1761 if (apic != int_to_apicintpin[res].ioapic)
1762 panic("apic_irq: inconsistent table");
1763 if (pin != int_to_apicintpin[res].int_pin)
1764 panic("apic_irq inconsistent table (2)");
1765 return res;
1766 }
1767 return -1;
1768}
1769
1770
1771/*
1772 * given a LOGICAL APIC# and pin#, return:
1773 * the associated trigger mode if found
1774 * -1 if NOT found
1775 */
1776int
1777apic_trigger(int apic, int pin)
1778{
1779 int x;
1780
1781 /* search each of the possible INTerrupt sources */
1782 for (x = 0; x < nintrs; ++x)
1783 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1784 (pin == io_apic_ints[x].dst_apic_int))
1785 return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1786
1787 return -1; /* NOT found */
1788}
1789
1790
1791/*
1792 * given a LOGICAL APIC# and pin#, return:
1793 * the associated 'active' level if found
1794 * -1 if NOT found
1795 */
1796int
1797apic_polarity(int apic, int pin)
1798{
1799 int x;
1800
1801 /* search each of the possible INTerrupt sources */
1802 for (x = 0; x < nintrs; ++x)
1803 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1804 (pin == io_apic_ints[x].dst_apic_int))
1805 return (io_apic_ints[x].int_flags & 0x03);
1806
1807 return -1; /* NOT found */
1808}
1809
1810
1811/*
1812 * set data according to MP defaults
1813 * FIXME: probably not complete yet...
1814 */
1815static void
1816default_mp_table(int type)
1817{
1818 int ap_cpu_id;
1819#if defined(APIC_IO)
1820 int io_apic_id;
1821 int pin;
1822#endif /* APIC_IO */
1823
1824#if 0
1825 printf(" MP default config type: %d\n", type);
1826 switch (type) {
1827 case 1:
1828 printf(" bus: ISA, APIC: 82489DX\n");
1829 break;
1830 case 2:
1831 printf(" bus: EISA, APIC: 82489DX\n");
1832 break;
1833 case 3:
1834 printf(" bus: EISA, APIC: 82489DX\n");
1835 break;
1836 case 4:
1837 printf(" bus: MCA, APIC: 82489DX\n");
1838 break;
1839 case 5:
1840 printf(" bus: ISA+PCI, APIC: Integrated\n");
1841 break;
1842 case 6:
1843 printf(" bus: EISA+PCI, APIC: Integrated\n");
1844 break;
1845 case 7:
1846 printf(" bus: MCA+PCI, APIC: Integrated\n");
1847 break;
1848 default:
1849 printf(" future type\n");
1850 break;
1851 /* NOTREACHED */
1852 }
1853#endif /* 0 */
1854
1855 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1856 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1857
1858 /* BSP */
1859 CPU_TO_ID(0) = boot_cpu_id;
1860 ID_TO_CPU(boot_cpu_id) = 0;
1861
1862 /* one and only AP */
1863 CPU_TO_ID(1) = ap_cpu_id;
1864 ID_TO_CPU(ap_cpu_id) = 1;
1865
1866#if defined(APIC_IO)
1867 /* one and only IO APIC */
1868 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1869
1870 /*
1871 * sanity check, refer to MP spec section 3.6.6, last paragraph
1872 * necessary as some hardware isn't properly setting up the IO APIC
1873 */
1874#if defined(REALLY_ANAL_IOAPICID_VALUE)
1875 if (io_apic_id != 2) {
1876#else
1877 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1878#endif /* REALLY_ANAL_IOAPICID_VALUE */
1879 io_apic_set_id(0, 2);
1880 io_apic_id = 2;
1881 }
1882 IO_TO_ID(0) = io_apic_id;
1883 ID_TO_IO(io_apic_id) = 0;
1884#endif /* APIC_IO */
1885
1886 /* fill out bus entries */
1887 switch (type) {
1888 case 1:
1889 case 2:
1890 case 3:
1891 case 4:
1892 case 5:
1893 case 6:
1894 case 7:
1895 bus_data[0].bus_id = default_data[type - 1][1];
1896 bus_data[0].bus_type = default_data[type - 1][2];
1897 bus_data[1].bus_id = default_data[type - 1][3];
1898 bus_data[1].bus_type = default_data[type - 1][4];
1899 break;
1900
1901 /* case 4: case 7: MCA NOT supported */
1902 default: /* illegal/reserved */
1903 panic("BAD default MP config: %d", type);
1904 /* NOTREACHED */
1905 }
1906
1907#if defined(APIC_IO)
1908 /* general cases from MP v1.4, table 5-2 */
1909 for (pin = 0; pin < 16; ++pin) {
1910 io_apic_ints[pin].int_type = 0;
1911 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */
1912 io_apic_ints[pin].src_bus_id = 0;
1913 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */
1914 io_apic_ints[pin].dst_apic_id = io_apic_id;
1915 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */
1916 }
1917
1918 /* special cases from MP v1.4, table 5-2 */
1919 if (type == 2) {
1920 io_apic_ints[2].int_type = 0xff; /* N/C */
1921 io_apic_ints[13].int_type = 0xff; /* N/C */
1922#if !defined(APIC_MIXED_MODE)
1923 /** FIXME: ??? */
1924 panic("sorry, can't support type 2 default yet");
1925#endif /* APIC_MIXED_MODE */
1926 }
1927 else
1928 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */
1929
1930 if (type == 7)
1931 io_apic_ints[0].int_type = 0xff; /* N/C */
1932 else
1933 io_apic_ints[0].int_type = 3; /* vectored 8259 */
1934#endif /* APIC_IO */
1935}
1936
1937
1938/*
1939 * start each AP in our list
1940 */
1941static int
1942start_all_aps(u_int boot_addr)
1943{
1944 int x, i, pg;
1945 u_char mpbiosreason;
1946 u_long mpbioswarmvec;
1947 struct globaldata *gd;
1948 char *stack;
1949
1950 POSTCODE(START_ALL_APS_POST);
1951
1952 /* initialize BSP's local APIC */
1953 apic_initialize();
1954 bsp_apic_ready = 1;
1955
1956 /* install the AP 1st level boot code */
1957 install_ap_tramp(boot_addr);
1958
1959
1960 /* save the current value of the warm-start vector */
1961 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1962#ifndef PC98
1963 outb(CMOS_REG, BIOS_RESET);
1964 mpbiosreason = inb(CMOS_DATA);
1965#endif
1966
1967 /* record BSP in CPU map */
1968 all_cpus = 1;
1969
1970 /* set up 0 -> 4MB P==V mapping for AP boot */
1971 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1972 invltlb();
1973
1974 /* start each AP */
1975 for (x = 1; x <= mp_naps; ++x) {
1976
1977 /* This is a bit verbose, it will go away soon. */
1978
1979 /* first page of AP's private space */
1980 pg = x * i386_btop(sizeof(struct privatespace));
1981
1982 /* allocate a new private data page */
1983 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1984
1985 /* wire it into the private page table page */
1986 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1987
1988 /* allocate and set up an idle stack data page */
1989 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1990 for (i = 0; i < UPAGES; i++)
1991 SMPpt[pg + 1 + i] = (pt_entry_t)
1992 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1993
1994 /* prime data page for it to use */
1995 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1996 gd->gd_cpuid = x;
1997 gd->gd_cpu_lockid = x << 24;
1998
1999 /* setup a vector to our boot code */
2000 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2001 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2002#ifndef PC98
2003 outb(CMOS_REG, BIOS_RESET);
2004 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2005#endif
2006
2007 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2008 bootAP = x;
2009
2010 /* attempt to start the Application Processor */
2011 CHECK_INIT(99); /* setup checkpoints */
2012 if (!start_ap(x, boot_addr)) {
2013 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2014 CHECK_PRINT("trace"); /* show checkpoints */
2015 /* better panic as the AP may be running loose */
2016 printf("panic y/n? [y] ");
2017 if (cngetc() != 'n')
2018 panic("bye-bye");
2019 }
2020 CHECK_PRINT("trace"); /* show checkpoints */
2021
2022 /* record its version info */
2023 cpu_apic_versions[x] = cpu_apic_versions[0];
2024
2025 all_cpus |= (1 << x); /* record AP in CPU map */
2026 }
2027
2028 /* build our map of 'other' CPUs */
2029 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2030
2031 /* fill in our (BSP) APIC version */
2032 cpu_apic_versions[0] = lapic.version;
2033
2034 /* restore the warmstart vector */
2035 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2036#ifndef PC98
2037 outb(CMOS_REG, BIOS_RESET);
2038 outb(CMOS_DATA, mpbiosreason);
2039#endif
2040
2041 /*
2042 * Set up the idle context for the BSP. Similar to above except
2043 * that some was done by locore, some by pmap.c and some is implicit
2044 * because the BSP is cpu#0 and the page is initially zero, and also
2045 * because we can refer to variables by name on the BSP..
2046 */
2047
2048 /* Allocate and setup BSP idle stack */
2049 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2050 for (i = 0; i < UPAGES; i++)
2051 SMPpt[1 + i] = (pt_entry_t)
2052 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2053
2054 *(int *)PTD = 0;
2055 pmap_set_opt();
2056
2057 /* number of APs actually started */
2058 return mp_ncpus - 1;
2059}
2060
2061
2062/*
2063 * load the 1st level AP boot code into base memory.
2064 */
2065
2066/* targets for relocation */
2067extern void bigJump(void);
2068extern void bootCodeSeg(void);
2069extern void bootDataSeg(void);
2070extern void MPentry(void);
2071extern u_int MP_GDT;
2072extern u_int mp_gdtbase;
2073
2074static void
2075install_ap_tramp(u_int boot_addr)
2076{
2077 int x;
2078 int size = *(int *) ((u_long) & bootMP_size);
2079 u_char *src = (u_char *) ((u_long) bootMP);
2080 u_char *dst = (u_char *) boot_addr + KERNBASE;
2081 u_int boot_base = (u_int) bootMP;
2082 u_int8_t *dst8;
2083 u_int16_t *dst16;
2084 u_int32_t *dst32;
2085
2086 POSTCODE(INSTALL_AP_TRAMP_POST);
2087
2088 for (x = 0; x < size; ++x)
2089 *dst++ = *src++;
2090
2091 /*
2092 * modify addresses in code we just moved to basemem. unfortunately we
2093 * need fairly detailed info about mpboot.s for this to work. changes
2094 * to mpboot.s might require changes here.
2095 */
2096
2097 /* boot code is located in KERNEL space */
2098 dst = (u_char *) boot_addr + KERNBASE;
2099
2100 /* modify the lgdt arg */
2101 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2102 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2103
2104 /* modify the ljmp target for MPentry() */
2105 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2106 *dst32 = ((u_int) MPentry - KERNBASE);
2107
2108 /* modify the target for boot code segment */
2109 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2110 dst8 = (u_int8_t *) (dst16 + 1);
2111 *dst16 = (u_int) boot_addr & 0xffff;
2112 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2113
2114 /* modify the target for boot data segment */
2115 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2116 dst8 = (u_int8_t *) (dst16 + 1);
2117 *dst16 = (u_int) boot_addr & 0xffff;
2118 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2119}
2120
2121
2122/*
2123 * this function starts the AP (application processor) identified
2124 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
2125 * to accomplish this. This is necessary because of the nuances
2126 * of the different hardware we might encounter. It ain't pretty,
2127 * but it seems to work.
2128 */
2129static int
2130start_ap(int logical_cpu, u_int boot_addr)
2131{
2132 int physical_cpu;
2133 int vector;
2134 int cpus;
2135 u_long icr_lo, icr_hi;
2136
2137 POSTCODE(START_AP_POST);
2138
2139 /* get the PHYSICAL APIC ID# */
2140 physical_cpu = CPU_TO_ID(logical_cpu);
2141
2142 /* calculate the vector */
2143 vector = (boot_addr >> 12) & 0xff;
2144
2145 /* used as a watchpoint to signal AP startup */
2146 cpus = mp_ncpus;
2147
2148 /*
2149 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2150 * and running the target CPU. OR this INIT IPI might be latched (P5
2151 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2152 * ignored.
2153 */
2154
2155 /* setup the address for the target AP */
2156 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2157 icr_hi |= (physical_cpu << 24);
2158 lapic.icr_hi = icr_hi;
2159
2160 /* do an INIT IPI: assert RESET */
2161 icr_lo = lapic.icr_lo & 0xfff00000;
2162 lapic.icr_lo = icr_lo | 0x0000c500;
2163
2164 /* wait for pending status end */
2165 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2166 /* spin */ ;
2167
2168 /* do an INIT IPI: deassert RESET */
2169 lapic.icr_lo = icr_lo | 0x00008500;
2170
2171 /* wait for pending status end */
2172 u_sleep(10000); /* wait ~10mS */
2173 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2174 /* spin */ ;
2175
2176 /*
2177 * next we do a STARTUP IPI: the previous INIT IPI might still be
2178 * latched, (P5 bug) this 1st STARTUP would then terminate
2179 * immediately, and the previously started INIT IPI would continue. OR
2180 * the previous INIT IPI has already run. and this STARTUP IPI will
2181 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2182 * will run.
2183 */
2184
2185 /* do a STARTUP IPI */
2186 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2187 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2188 /* spin */ ;
2189 u_sleep(200); /* wait ~200uS */
2190
2191 /*
2192 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2193 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2194 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2195 * recognized after hardware RESET or INIT IPI.
2196 */
2197
2198 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2199 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2200 /* spin */ ;
2201 u_sleep(200); /* wait ~200uS */
2202
2203 /* wait for it to start */
2204 set_apic_timer(5000000);/* == 5 seconds */
2205 while (read_apic_timer())
2206 if (mp_ncpus > cpus)
2207 return 1; /* return SUCCESS */
2208
2209 return 0; /* return FAILURE */
2210}
2211
2212/*
2213 * Flush the TLB on all other CPU's
2214 *
2215 * XXX: Needs to handshake and wait for completion before proceding.
2216 */
2217void
2218smp_invltlb(void)
2219{
2220#if defined(APIC_IO)
2221 if (smp_started && invltlb_ok)
2222 all_but_self_ipi(XINVLTLB_OFFSET);
2223#endif /* APIC_IO */
2224}
2225
2226void
2227invlpg(u_int addr)
2228{
2229 __asm __volatile("invlpg (%0)"::"r"(addr):"memory");
2230
2231 /* send a message to the other CPUs */
2232 smp_invltlb();
2233}
2234
2235void
2236invltlb(void)
2237{
2238 u_long temp;
2239
2240 /*
2241 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2242 * inlined.
2243 */
2244 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2245
2246 /* send a message to the other CPUs */
2247 smp_invltlb();
2248}
2249
2250
2251/*
2252 * This is called once the rest of the system is up and running and we're
2253 * ready to let the AP's out of the pen.
2254 */
2255void
2256ap_init(void)
2257{
2258 u_int apic_id;
2259
2260 /* spin until all the AP's are ready */
2261 while (!aps_ready)
2262 /* spin */ ;
2263
2264 /*
2265 * Set curproc to our per-cpu idleproc so that mutexes have
2266 * something unique to lock with.
2267 */
2268 PCPU_SET(curproc, PCPU_GET(idleproc));
2269
2270 /* lock against other AP's that are waking up */
2271 mtx_enter(&ap_boot_mtx, MTX_SPIN);
2271 mtx_lock_spin(&ap_boot_mtx);
2272
2273 /* BSP may have changed PTD while we're waiting for the lock */
2274 cpu_invltlb();
2275
2276 smp_cpus++;
2277
2278#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2279 lidt(&r_idt);
2280#endif
2281
2282 /* Build our map of 'other' CPUs. */
2283 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2284
2285 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2286
2287 /* set up CPU registers and state */
2288 cpu_setregs();
2289
2290 /* set up FPU state on the AP */
2291 npxinit(__INITIAL_NPXCW__);
2292
2293 /* A quick check from sanity claus */
2294 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2295 if (PCPU_GET(cpuid) != apic_id) {
2296 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2297 printf("SMP: apic_id = %d\n", apic_id);
2298 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2299 panic("cpuid mismatch! boom!!");
2300 }
2301
2302 /* Init local apic for irq's */
2303 apic_initialize();
2304
2305 /* Set memory range attributes for this CPU to match the BSP */
2306 mem_range_AP_init();
2307
2308 /*
2309 * Activate smp_invltlb, although strictly speaking, this isn't
2310 * quite correct yet. We should have a bitfield for cpus willing
2311 * to accept TLB flush IPI's or something and sync them.
2312 */
2313 if (smp_cpus == mp_ncpus) {
2314 invltlb_ok = 1;
2315 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2316 smp_active = 1; /* historic */
2317 }
2318
2319 /* let other AP's wake up now */
2272
2273 /* BSP may have changed PTD while we're waiting for the lock */
2274 cpu_invltlb();
2275
2276 smp_cpus++;
2277
2278#if defined(I586_CPU) && !defined(NO_F00F_HACK)
2279 lidt(&r_idt);
2280#endif
2281
2282 /* Build our map of 'other' CPUs. */
2283 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2284
2285 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2286
2287 /* set up CPU registers and state */
2288 cpu_setregs();
2289
2290 /* set up FPU state on the AP */
2291 npxinit(__INITIAL_NPXCW__);
2292
2293 /* A quick check from sanity claus */
2294 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2295 if (PCPU_GET(cpuid) != apic_id) {
2296 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2297 printf("SMP: apic_id = %d\n", apic_id);
2298 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2299 panic("cpuid mismatch! boom!!");
2300 }
2301
2302 /* Init local apic for irq's */
2303 apic_initialize();
2304
2305 /* Set memory range attributes for this CPU to match the BSP */
2306 mem_range_AP_init();
2307
2308 /*
2309 * Activate smp_invltlb, although strictly speaking, this isn't
2310 * quite correct yet. We should have a bitfield for cpus willing
2311 * to accept TLB flush IPI's or something and sync them.
2312 */
2313 if (smp_cpus == mp_ncpus) {
2314 invltlb_ok = 1;
2315 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2316 smp_active = 1; /* historic */
2317 }
2318
2319 /* let other AP's wake up now */
2320 mtx_exit(&ap_boot_mtx, MTX_SPIN);
2320 mtx_unlock_spin(&ap_boot_mtx);
2321
2322 /* wait until all the AP's are up */
2323 while (smp_started == 0)
2324 ; /* nothing */
2325
2326 microuptime(PCPU_PTR(switchtime));
2327 PCPU_SET(switchticks, ticks);
2328
2329 /* ok, now grab sched_lock and enter the scheduler */
2330 enable_intr();
2321
2322 /* wait until all the AP's are up */
2323 while (smp_started == 0)
2324 ; /* nothing */
2325
2326 microuptime(PCPU_PTR(switchtime));
2327 PCPU_SET(switchticks, ticks);
2328
2329 /* ok, now grab sched_lock and enter the scheduler */
2330 enable_intr();
2331 mtx_enter(&sched_lock, MTX_SPIN);
2331 mtx_lock_spin(&sched_lock);
2332 cpu_throw(); /* doesn't return */
2333
2334 panic("scheduler returned us to ap_init");
2335}
2336
2337#ifdef BETTER_CLOCK
2338
2339#define CHECKSTATE_USER 0
2340#define CHECKSTATE_SYS 1
2341#define CHECKSTATE_INTR 2
2342
2343/* Do not staticize. Used from apic_vector.s */
2344struct proc* checkstate_curproc[MAXCPU];
2345int checkstate_cpustate[MAXCPU];
2346u_long checkstate_pc[MAXCPU];
2347
2348#define PC_TO_INDEX(pc, prof) \
2349 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
2350 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2351
2352static void
2353addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2354{
2355 int i;
2356 struct uprof *prof;
2357 u_long pc;
2358
2359 pc = checkstate_pc[id];
2360 prof = &p->p_stats->p_prof;
2361 if (pc >= prof->pr_off &&
2362 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2363 mtx_assert(&sched_lock, MA_OWNED);
2364 if ((p->p_sflag & PS_OWEUPC) == 0) {
2365 prof->pr_addr = pc;
2366 prof->pr_ticks = 1;
2367 p->p_sflag |= PS_OWEUPC;
2368 }
2369 *astmap |= (1 << id);
2370 }
2371}
2372
2373static void
2374forwarded_statclock(int id, int pscnt, int *astmap)
2375{
2376 struct pstats *pstats;
2377 long rss;
2378 struct rusage *ru;
2379 struct vmspace *vm;
2380 int cpustate;
2381 struct proc *p;
2382#ifdef GPROF
2383 register struct gmonparam *g;
2384 int i;
2385#endif
2386
2387 mtx_assert(&sched_lock, MA_OWNED);
2388 p = checkstate_curproc[id];
2389 cpustate = checkstate_cpustate[id];
2390
2391 /* XXX */
2392 if (p->p_ithd)
2393 cpustate = CHECKSTATE_INTR;
2394 else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2395 cpustate = CHECKSTATE_SYS;
2396
2397 switch (cpustate) {
2398 case CHECKSTATE_USER:
2399 if (p->p_sflag & PS_PROFIL)
2400 addupc_intr_forwarded(p, id, astmap);
2401 if (pscnt > 1)
2402 return;
2403 p->p_uticks++;
2404 if (p->p_nice > NZERO)
2405 cp_time[CP_NICE]++;
2406 else
2407 cp_time[CP_USER]++;
2408 break;
2409 case CHECKSTATE_SYS:
2410#ifdef GPROF
2411 /*
2412 * Kernel statistics are just like addupc_intr, only easier.
2413 */
2414 g = &_gmonparam;
2415 if (g->state == GMON_PROF_ON) {
2416 i = checkstate_pc[id] - g->lowpc;
2417 if (i < g->textsize) {
2418 i /= HISTFRACTION * sizeof(*g->kcount);
2419 g->kcount[i]++;
2420 }
2421 }
2422#endif
2423 if (pscnt > 1)
2424 return;
2425
2426 p->p_sticks++;
2427 if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2428 cp_time[CP_IDLE]++;
2429 else
2430 cp_time[CP_SYS]++;
2431 break;
2432 case CHECKSTATE_INTR:
2433 default:
2434#ifdef GPROF
2435 /*
2436 * Kernel statistics are just like addupc_intr, only easier.
2437 */
2438 g = &_gmonparam;
2439 if (g->state == GMON_PROF_ON) {
2440 i = checkstate_pc[id] - g->lowpc;
2441 if (i < g->textsize) {
2442 i /= HISTFRACTION * sizeof(*g->kcount);
2443 g->kcount[i]++;
2444 }
2445 }
2446#endif
2447 if (pscnt > 1)
2448 return;
2449 KASSERT(p != NULL, ("NULL process in interrupt state"));
2450 p->p_iticks++;
2451 cp_time[CP_INTR]++;
2452 }
2453
2454 schedclock(p);
2455
2456 /* Update resource usage integrals and maximums. */
2457 if ((pstats = p->p_stats) != NULL &&
2458 (ru = &pstats->p_ru) != NULL &&
2459 (vm = p->p_vmspace) != NULL) {
2460 ru->ru_ixrss += pgtok(vm->vm_tsize);
2461 ru->ru_idrss += pgtok(vm->vm_dsize);
2462 ru->ru_isrss += pgtok(vm->vm_ssize);
2463 rss = pgtok(vmspace_resident_count(vm));
2464 if (ru->ru_maxrss < rss)
2465 ru->ru_maxrss = rss;
2466 }
2467}
2468
2469void
2470forward_statclock(int pscnt)
2471{
2472 int map;
2473 int id;
2474 int i;
2475
2476 /* Kludge. We don't yet have separate locks for the interrupts
2477 * and the kernel. This means that we cannot let the other processors
2478 * handle complex interrupts while inhibiting them from entering
2479 * the kernel in a non-interrupt context.
2480 *
2481 * What we can do, without changing the locking mechanisms yet,
2482 * is letting the other processors handle a very simple interrupt
2483 * (wich determines the processor states), and do the main
2484 * work ourself.
2485 */
2486
2487 CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2488
2489 if (!smp_started || !invltlb_ok || cold || panicstr)
2490 return;
2491
2492 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
2493
2494 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2495 checkstate_probed_cpus = 0;
2496 if (map != 0)
2497 selected_apic_ipi(map,
2498 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2499
2500 i = 0;
2501 while (checkstate_probed_cpus != map) {
2502 /* spin */
2503 i++;
2504 if (i == 100000) {
2505#ifdef BETTER_CLOCK_DIAGNOSTIC
2506 printf("forward_statclock: checkstate %x\n",
2507 checkstate_probed_cpus);
2508#endif
2509 break;
2510 }
2511 }
2512
2513 /*
2514 * Step 2: walk through other processors processes, update ticks and
2515 * profiling info.
2516 */
2517
2518 map = 0;
2519 for (id = 0; id < mp_ncpus; id++) {
2520 if (id == PCPU_GET(cpuid))
2521 continue;
2522 if (((1 << id) & checkstate_probed_cpus) == 0)
2523 continue;
2524 forwarded_statclock(id, pscnt, &map);
2525 }
2526 if (map != 0) {
2527 checkstate_need_ast |= map;
2528 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2529 i = 0;
2530 while ((checkstate_need_ast & map) != 0) {
2531 /* spin */
2532 i++;
2533 if (i > 100000) {
2534#ifdef BETTER_CLOCK_DIAGNOSTIC
2535 printf("forward_statclock: dropped ast 0x%x\n",
2536 checkstate_need_ast & map);
2537#endif
2538 break;
2539 }
2540 }
2541 }
2542}
2543
2544void
2545forward_hardclock(int pscnt)
2546{
2547 int map;
2548 int id;
2549 struct proc *p;
2550 struct pstats *pstats;
2551 int i;
2552
2553 /* Kludge. We don't yet have separate locks for the interrupts
2554 * and the kernel. This means that we cannot let the other processors
2555 * handle complex interrupts while inhibiting them from entering
2556 * the kernel in a non-interrupt context.
2557 *
2558 * What we can do, without changing the locking mechanisms yet,
2559 * is letting the other processors handle a very simple interrupt
2560 * (wich determines the processor states), and do the main
2561 * work ourself.
2562 */
2563
2564 CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2565
2566 if (!smp_started || !invltlb_ok || cold || panicstr)
2567 return;
2568
2569 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
2570
2571 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2572 checkstate_probed_cpus = 0;
2573 if (map != 0)
2574 selected_apic_ipi(map,
2575 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2576
2577 i = 0;
2578 while (checkstate_probed_cpus != map) {
2579 /* spin */
2580 i++;
2581 if (i == 100000) {
2582#ifdef BETTER_CLOCK_DIAGNOSTIC
2583 printf("forward_hardclock: checkstate %x\n",
2584 checkstate_probed_cpus);
2585#endif
2586 break;
2587 }
2588 }
2589
2590 /*
2591 * Step 2: walk through other processors processes, update virtual
2592 * timer and profiling timer. If stathz == 0, also update ticks and
2593 * profiling info.
2594 */
2595
2596 map = 0;
2597 for (id = 0; id < mp_ncpus; id++) {
2598 if (id == PCPU_GET(cpuid))
2599 continue;
2600 if (((1 << id) & checkstate_probed_cpus) == 0)
2601 continue;
2602 p = checkstate_curproc[id];
2603 if (p) {
2604 pstats = p->p_stats;
2605 if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2606 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2607 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2608 p->p_sflag |= PS_ALRMPEND;
2609 map |= (1 << id);
2610 }
2611 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2612 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2613 p->p_sflag |= PS_PROFPEND;
2614 map |= (1 << id);
2615 }
2616 }
2617 if (stathz == 0) {
2618 forwarded_statclock( id, pscnt, &map);
2619 }
2620 }
2621 if (map != 0) {
2622 checkstate_need_ast |= map;
2623 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2624 i = 0;
2625 while ((checkstate_need_ast & map) != 0) {
2626 /* spin */
2627 i++;
2628 if (i > 100000) {
2629#ifdef BETTER_CLOCK_DIAGNOSTIC
2630 printf("forward_hardclock: dropped ast 0x%x\n",
2631 checkstate_need_ast & map);
2632#endif
2633 break;
2634 }
2635 }
2636 }
2637}
2638
2639#endif /* BETTER_CLOCK */
2640
2641void
2642forward_signal(struct proc *p)
2643{
2644 int map;
2645 int id;
2646 int i;
2647
2648 /* Kludge. We don't yet have separate locks for the interrupts
2649 * and the kernel. This means that we cannot let the other processors
2650 * handle complex interrupts while inhibiting them from entering
2651 * the kernel in a non-interrupt context.
2652 *
2653 * What we can do, without changing the locking mechanisms yet,
2654 * is letting the other processors handle a very simple interrupt
2655 * (wich determines the processor states), and do the main
2656 * work ourself.
2657 */
2658
2659 CTR1(KTR_SMP, "forward_signal(%p)", p);
2660
2661 if (!smp_started || !invltlb_ok || cold || panicstr)
2662 return;
2663 if (!forward_signal_enabled)
2664 return;
2332 cpu_throw(); /* doesn't return */
2333
2334 panic("scheduler returned us to ap_init");
2335}
2336
2337#ifdef BETTER_CLOCK
2338
2339#define CHECKSTATE_USER 0
2340#define CHECKSTATE_SYS 1
2341#define CHECKSTATE_INTR 2
2342
2343/* Do not staticize. Used from apic_vector.s */
2344struct proc* checkstate_curproc[MAXCPU];
2345int checkstate_cpustate[MAXCPU];
2346u_long checkstate_pc[MAXCPU];
2347
2348#define PC_TO_INDEX(pc, prof) \
2349 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
2350 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2351
2352static void
2353addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2354{
2355 int i;
2356 struct uprof *prof;
2357 u_long pc;
2358
2359 pc = checkstate_pc[id];
2360 prof = &p->p_stats->p_prof;
2361 if (pc >= prof->pr_off &&
2362 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2363 mtx_assert(&sched_lock, MA_OWNED);
2364 if ((p->p_sflag & PS_OWEUPC) == 0) {
2365 prof->pr_addr = pc;
2366 prof->pr_ticks = 1;
2367 p->p_sflag |= PS_OWEUPC;
2368 }
2369 *astmap |= (1 << id);
2370 }
2371}
2372
2373static void
2374forwarded_statclock(int id, int pscnt, int *astmap)
2375{
2376 struct pstats *pstats;
2377 long rss;
2378 struct rusage *ru;
2379 struct vmspace *vm;
2380 int cpustate;
2381 struct proc *p;
2382#ifdef GPROF
2383 register struct gmonparam *g;
2384 int i;
2385#endif
2386
2387 mtx_assert(&sched_lock, MA_OWNED);
2388 p = checkstate_curproc[id];
2389 cpustate = checkstate_cpustate[id];
2390
2391 /* XXX */
2392 if (p->p_ithd)
2393 cpustate = CHECKSTATE_INTR;
2394 else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2395 cpustate = CHECKSTATE_SYS;
2396
2397 switch (cpustate) {
2398 case CHECKSTATE_USER:
2399 if (p->p_sflag & PS_PROFIL)
2400 addupc_intr_forwarded(p, id, astmap);
2401 if (pscnt > 1)
2402 return;
2403 p->p_uticks++;
2404 if (p->p_nice > NZERO)
2405 cp_time[CP_NICE]++;
2406 else
2407 cp_time[CP_USER]++;
2408 break;
2409 case CHECKSTATE_SYS:
2410#ifdef GPROF
2411 /*
2412 * Kernel statistics are just like addupc_intr, only easier.
2413 */
2414 g = &_gmonparam;
2415 if (g->state == GMON_PROF_ON) {
2416 i = checkstate_pc[id] - g->lowpc;
2417 if (i < g->textsize) {
2418 i /= HISTFRACTION * sizeof(*g->kcount);
2419 g->kcount[i]++;
2420 }
2421 }
2422#endif
2423 if (pscnt > 1)
2424 return;
2425
2426 p->p_sticks++;
2427 if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2428 cp_time[CP_IDLE]++;
2429 else
2430 cp_time[CP_SYS]++;
2431 break;
2432 case CHECKSTATE_INTR:
2433 default:
2434#ifdef GPROF
2435 /*
2436 * Kernel statistics are just like addupc_intr, only easier.
2437 */
2438 g = &_gmonparam;
2439 if (g->state == GMON_PROF_ON) {
2440 i = checkstate_pc[id] - g->lowpc;
2441 if (i < g->textsize) {
2442 i /= HISTFRACTION * sizeof(*g->kcount);
2443 g->kcount[i]++;
2444 }
2445 }
2446#endif
2447 if (pscnt > 1)
2448 return;
2449 KASSERT(p != NULL, ("NULL process in interrupt state"));
2450 p->p_iticks++;
2451 cp_time[CP_INTR]++;
2452 }
2453
2454 schedclock(p);
2455
2456 /* Update resource usage integrals and maximums. */
2457 if ((pstats = p->p_stats) != NULL &&
2458 (ru = &pstats->p_ru) != NULL &&
2459 (vm = p->p_vmspace) != NULL) {
2460 ru->ru_ixrss += pgtok(vm->vm_tsize);
2461 ru->ru_idrss += pgtok(vm->vm_dsize);
2462 ru->ru_isrss += pgtok(vm->vm_ssize);
2463 rss = pgtok(vmspace_resident_count(vm));
2464 if (ru->ru_maxrss < rss)
2465 ru->ru_maxrss = rss;
2466 }
2467}
2468
2469void
2470forward_statclock(int pscnt)
2471{
2472 int map;
2473 int id;
2474 int i;
2475
2476 /* Kludge. We don't yet have separate locks for the interrupts
2477 * and the kernel. This means that we cannot let the other processors
2478 * handle complex interrupts while inhibiting them from entering
2479 * the kernel in a non-interrupt context.
2480 *
2481 * What we can do, without changing the locking mechanisms yet,
2482 * is letting the other processors handle a very simple interrupt
2483 * (wich determines the processor states), and do the main
2484 * work ourself.
2485 */
2486
2487 CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2488
2489 if (!smp_started || !invltlb_ok || cold || panicstr)
2490 return;
2491
2492 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
2493
2494 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2495 checkstate_probed_cpus = 0;
2496 if (map != 0)
2497 selected_apic_ipi(map,
2498 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2499
2500 i = 0;
2501 while (checkstate_probed_cpus != map) {
2502 /* spin */
2503 i++;
2504 if (i == 100000) {
2505#ifdef BETTER_CLOCK_DIAGNOSTIC
2506 printf("forward_statclock: checkstate %x\n",
2507 checkstate_probed_cpus);
2508#endif
2509 break;
2510 }
2511 }
2512
2513 /*
2514 * Step 2: walk through other processors processes, update ticks and
2515 * profiling info.
2516 */
2517
2518 map = 0;
2519 for (id = 0; id < mp_ncpus; id++) {
2520 if (id == PCPU_GET(cpuid))
2521 continue;
2522 if (((1 << id) & checkstate_probed_cpus) == 0)
2523 continue;
2524 forwarded_statclock(id, pscnt, &map);
2525 }
2526 if (map != 0) {
2527 checkstate_need_ast |= map;
2528 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2529 i = 0;
2530 while ((checkstate_need_ast & map) != 0) {
2531 /* spin */
2532 i++;
2533 if (i > 100000) {
2534#ifdef BETTER_CLOCK_DIAGNOSTIC
2535 printf("forward_statclock: dropped ast 0x%x\n",
2536 checkstate_need_ast & map);
2537#endif
2538 break;
2539 }
2540 }
2541 }
2542}
2543
2544void
2545forward_hardclock(int pscnt)
2546{
2547 int map;
2548 int id;
2549 struct proc *p;
2550 struct pstats *pstats;
2551 int i;
2552
2553 /* Kludge. We don't yet have separate locks for the interrupts
2554 * and the kernel. This means that we cannot let the other processors
2555 * handle complex interrupts while inhibiting them from entering
2556 * the kernel in a non-interrupt context.
2557 *
2558 * What we can do, without changing the locking mechanisms yet,
2559 * is letting the other processors handle a very simple interrupt
2560 * (wich determines the processor states), and do the main
2561 * work ourself.
2562 */
2563
2564 CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2565
2566 if (!smp_started || !invltlb_ok || cold || panicstr)
2567 return;
2568
2569 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
2570
2571 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2572 checkstate_probed_cpus = 0;
2573 if (map != 0)
2574 selected_apic_ipi(map,
2575 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2576
2577 i = 0;
2578 while (checkstate_probed_cpus != map) {
2579 /* spin */
2580 i++;
2581 if (i == 100000) {
2582#ifdef BETTER_CLOCK_DIAGNOSTIC
2583 printf("forward_hardclock: checkstate %x\n",
2584 checkstate_probed_cpus);
2585#endif
2586 break;
2587 }
2588 }
2589
2590 /*
2591 * Step 2: walk through other processors processes, update virtual
2592 * timer and profiling timer. If stathz == 0, also update ticks and
2593 * profiling info.
2594 */
2595
2596 map = 0;
2597 for (id = 0; id < mp_ncpus; id++) {
2598 if (id == PCPU_GET(cpuid))
2599 continue;
2600 if (((1 << id) & checkstate_probed_cpus) == 0)
2601 continue;
2602 p = checkstate_curproc[id];
2603 if (p) {
2604 pstats = p->p_stats;
2605 if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2606 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2607 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2608 p->p_sflag |= PS_ALRMPEND;
2609 map |= (1 << id);
2610 }
2611 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2612 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2613 p->p_sflag |= PS_PROFPEND;
2614 map |= (1 << id);
2615 }
2616 }
2617 if (stathz == 0) {
2618 forwarded_statclock( id, pscnt, &map);
2619 }
2620 }
2621 if (map != 0) {
2622 checkstate_need_ast |= map;
2623 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2624 i = 0;
2625 while ((checkstate_need_ast & map) != 0) {
2626 /* spin */
2627 i++;
2628 if (i > 100000) {
2629#ifdef BETTER_CLOCK_DIAGNOSTIC
2630 printf("forward_hardclock: dropped ast 0x%x\n",
2631 checkstate_need_ast & map);
2632#endif
2633 break;
2634 }
2635 }
2636 }
2637}
2638
2639#endif /* BETTER_CLOCK */
2640
2641void
2642forward_signal(struct proc *p)
2643{
2644 int map;
2645 int id;
2646 int i;
2647
2648 /* Kludge. We don't yet have separate locks for the interrupts
2649 * and the kernel. This means that we cannot let the other processors
2650 * handle complex interrupts while inhibiting them from entering
2651 * the kernel in a non-interrupt context.
2652 *
2653 * What we can do, without changing the locking mechanisms yet,
2654 * is letting the other processors handle a very simple interrupt
2655 * (wich determines the processor states), and do the main
2656 * work ourself.
2657 */
2658
2659 CTR1(KTR_SMP, "forward_signal(%p)", p);
2660
2661 if (!smp_started || !invltlb_ok || cold || panicstr)
2662 return;
2663 if (!forward_signal_enabled)
2664 return;
2665 mtx_enter(&sched_lock, MTX_SPIN);
2665 mtx_lock_spin(&sched_lock);
2666 while (1) {
2667 if (p->p_stat != SRUN) {
2666 while (1) {
2667 if (p->p_stat != SRUN) {
2668 mtx_exit(&sched_lock, MTX_SPIN);
2668 mtx_unlock_spin(&sched_lock);
2669 return;
2670 }
2671 id = p->p_oncpu;
2669 return;
2670 }
2671 id = p->p_oncpu;
2672 mtx_exit(&sched_lock, MTX_SPIN);
2672 mtx_unlock_spin(&sched_lock);
2673 if (id == 0xff)
2674 return;
2675 map = (1<<id);
2676 checkstate_need_ast |= map;
2677 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2678 i = 0;
2679 while ((checkstate_need_ast & map) != 0) {
2680 /* spin */
2681 i++;
2682 if (i > 100000) {
2683#if 0
2684 printf("forward_signal: dropped ast 0x%x\n",
2685 checkstate_need_ast & map);
2686#endif
2687 break;
2688 }
2689 }
2673 if (id == 0xff)
2674 return;
2675 map = (1<<id);
2676 checkstate_need_ast |= map;
2677 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2678 i = 0;
2679 while ((checkstate_need_ast & map) != 0) {
2680 /* spin */
2681 i++;
2682 if (i > 100000) {
2683#if 0
2684 printf("forward_signal: dropped ast 0x%x\n",
2685 checkstate_need_ast & map);
2686#endif
2687 break;
2688 }
2689 }
2690 mtx_enter(&sched_lock, MTX_SPIN);
2690 mtx_lock_spin(&sched_lock);
2691 if (id == p->p_oncpu) {
2691 if (id == p->p_oncpu) {
2692 mtx_exit(&sched_lock, MTX_SPIN);
2692 mtx_unlock_spin(&sched_lock);
2693 return;
2694 }
2695 }
2696}
2697
2698void
2699forward_roundrobin(void)
2700{
2701 u_int map;
2702 int i;
2703
2704 CTR0(KTR_SMP, "forward_roundrobin()");
2705
2706 if (!smp_started || !invltlb_ok || cold || panicstr)
2707 return;
2708 if (!forward_roundrobin_enabled)
2709 return;
2710 resched_cpus |= PCPU_GET(other_cpus);
2711 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2712#if 1
2713 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2714#else
2715 (void) all_but_self_ipi(XCPUAST_OFFSET);
2716#endif
2717 i = 0;
2718 while ((checkstate_need_ast & map) != 0) {
2719 /* spin */
2720 i++;
2721 if (i > 100000) {
2722#if 0
2723 printf("forward_roundrobin: dropped ast 0x%x\n",
2724 checkstate_need_ast & map);
2725#endif
2726 break;
2727 }
2728 }
2729}
2730
2731/*
2732 * When called the executing CPU will send an IPI to all other CPUs
2733 * requesting that they halt execution.
2734 *
2735 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2736 *
2737 * - Signals all CPUs in map to stop.
2738 * - Waits for each to stop.
2739 *
2740 * Returns:
2741 * -1: error
2742 * 0: NA
2743 * 1: ok
2744 *
2745 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2746 * from executing at same time.
2747 */
2748int
2749stop_cpus(u_int map)
2750{
2751 int count = 0;
2752
2753 if (!smp_started)
2754 return 0;
2755
2756 /* send the Xcpustop IPI to all CPUs in map */
2757 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2758
2759 while (count++ < 100000 && (stopped_cpus & map) != map)
2760 /* spin */ ;
2761
2762#ifdef DIAGNOSTIC
2763 if ((stopped_cpus & map) != map)
2764 printf("Warning: CPUs 0x%x did not stop!\n",
2765 (~(stopped_cpus & map)) & map);
2766#endif
2767
2768 return 1;
2769}
2770
2771
2772/*
2773 * Called by a CPU to restart stopped CPUs.
2774 *
2775 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2776 *
2777 * - Signals all CPUs in map to restart.
2778 * - Waits for each to restart.
2779 *
2780 * Returns:
2781 * -1: error
2782 * 0: NA
2783 * 1: ok
2784 */
2785int
2786restart_cpus(u_int map)
2787{
2788 int count = 0;
2789
2790 if (!smp_started)
2791 return 0;
2792
2793 started_cpus = map; /* signal other cpus to restart */
2794
2795 /* wait for each to clear its bit */
2796 while (count++ < 100000 && (stopped_cpus & map) != 0)
2797 /* spin */ ;
2798
2799#ifdef DIAGNOSTIC
2800 if ((stopped_cpus & map) != 0)
2801 printf("Warning: CPUs 0x%x did not restart!\n",
2802 (~(stopped_cpus & map)) & map);
2803#endif
2804
2805 return 1;
2806}
2807
2808
2809#ifdef APIC_INTR_REORDER
2810/*
2811 * Maintain mapping from softintr vector to isr bit in local apic.
2812 */
2813void
2814set_lapic_isrloc(int intr, int vector)
2815{
2816 if (intr < 0 || intr > 32)
2817 panic("set_apic_isrloc: bad intr argument: %d",intr);
2818 if (vector < ICU_OFFSET || vector > 255)
2819 panic("set_apic_isrloc: bad vector argument: %d",vector);
2820 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2821 apic_isrbit_location[intr].bit = (1<<(vector & 31));
2822}
2823#endif
2824
2825/*
2826 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
2827 * (if specified), rendezvous, execute the action function (if specified),
2828 * rendezvous again, execute the teardown function (if specified), and then
2829 * resume.
2830 *
2831 * Note that the supplied external functions _must_ be reentrant and aware
2832 * that they are running in parallel and in an unknown lock context.
2833 */
2834static void (*smp_rv_setup_func)(void *arg);
2835static void (*smp_rv_action_func)(void *arg);
2836static void (*smp_rv_teardown_func)(void *arg);
2837static void *smp_rv_func_arg;
2838static volatile int smp_rv_waiters[2];
2839
2840void
2841smp_rendezvous_action(void)
2842{
2843 /* setup function */
2844 if (smp_rv_setup_func != NULL)
2845 smp_rv_setup_func(smp_rv_func_arg);
2846 /* spin on entry rendezvous */
2847 atomic_add_int(&smp_rv_waiters[0], 1);
2848 while (smp_rv_waiters[0] < mp_ncpus)
2849 ;
2850 /* action function */
2851 if (smp_rv_action_func != NULL)
2852 smp_rv_action_func(smp_rv_func_arg);
2853 /* spin on exit rendezvous */
2854 atomic_add_int(&smp_rv_waiters[1], 1);
2855 while (smp_rv_waiters[1] < mp_ncpus)
2856 ;
2857 /* teardown function */
2858 if (smp_rv_teardown_func != NULL)
2859 smp_rv_teardown_func(smp_rv_func_arg);
2860}
2861
2862void
2863smp_rendezvous(void (* setup_func)(void *),
2864 void (* action_func)(void *),
2865 void (* teardown_func)(void *),
2866 void *arg)
2867{
2868
2869 /* obtain rendezvous lock */
2693 return;
2694 }
2695 }
2696}
2697
2698void
2699forward_roundrobin(void)
2700{
2701 u_int map;
2702 int i;
2703
2704 CTR0(KTR_SMP, "forward_roundrobin()");
2705
2706 if (!smp_started || !invltlb_ok || cold || panicstr)
2707 return;
2708 if (!forward_roundrobin_enabled)
2709 return;
2710 resched_cpus |= PCPU_GET(other_cpus);
2711 map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2712#if 1
2713 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2714#else
2715 (void) all_but_self_ipi(XCPUAST_OFFSET);
2716#endif
2717 i = 0;
2718 while ((checkstate_need_ast & map) != 0) {
2719 /* spin */
2720 i++;
2721 if (i > 100000) {
2722#if 0
2723 printf("forward_roundrobin: dropped ast 0x%x\n",
2724 checkstate_need_ast & map);
2725#endif
2726 break;
2727 }
2728 }
2729}
2730
2731/*
2732 * When called the executing CPU will send an IPI to all other CPUs
2733 * requesting that they halt execution.
2734 *
2735 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2736 *
2737 * - Signals all CPUs in map to stop.
2738 * - Waits for each to stop.
2739 *
2740 * Returns:
2741 * -1: error
2742 * 0: NA
2743 * 1: ok
2744 *
2745 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2746 * from executing at same time.
2747 */
2748int
2749stop_cpus(u_int map)
2750{
2751 int count = 0;
2752
2753 if (!smp_started)
2754 return 0;
2755
2756 /* send the Xcpustop IPI to all CPUs in map */
2757 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2758
2759 while (count++ < 100000 && (stopped_cpus & map) != map)
2760 /* spin */ ;
2761
2762#ifdef DIAGNOSTIC
2763 if ((stopped_cpus & map) != map)
2764 printf("Warning: CPUs 0x%x did not stop!\n",
2765 (~(stopped_cpus & map)) & map);
2766#endif
2767
2768 return 1;
2769}
2770
2771
2772/*
2773 * Called by a CPU to restart stopped CPUs.
2774 *
2775 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2776 *
2777 * - Signals all CPUs in map to restart.
2778 * - Waits for each to restart.
2779 *
2780 * Returns:
2781 * -1: error
2782 * 0: NA
2783 * 1: ok
2784 */
2785int
2786restart_cpus(u_int map)
2787{
2788 int count = 0;
2789
2790 if (!smp_started)
2791 return 0;
2792
2793 started_cpus = map; /* signal other cpus to restart */
2794
2795 /* wait for each to clear its bit */
2796 while (count++ < 100000 && (stopped_cpus & map) != 0)
2797 /* spin */ ;
2798
2799#ifdef DIAGNOSTIC
2800 if ((stopped_cpus & map) != 0)
2801 printf("Warning: CPUs 0x%x did not restart!\n",
2802 (~(stopped_cpus & map)) & map);
2803#endif
2804
2805 return 1;
2806}
2807
2808
2809#ifdef APIC_INTR_REORDER
2810/*
2811 * Maintain mapping from softintr vector to isr bit in local apic.
2812 */
2813void
2814set_lapic_isrloc(int intr, int vector)
2815{
2816 if (intr < 0 || intr > 32)
2817 panic("set_apic_isrloc: bad intr argument: %d",intr);
2818 if (vector < ICU_OFFSET || vector > 255)
2819 panic("set_apic_isrloc: bad vector argument: %d",vector);
2820 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2821 apic_isrbit_location[intr].bit = (1<<(vector & 31));
2822}
2823#endif
2824
2825/*
2826 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
2827 * (if specified), rendezvous, execute the action function (if specified),
2828 * rendezvous again, execute the teardown function (if specified), and then
2829 * resume.
2830 *
2831 * Note that the supplied external functions _must_ be reentrant and aware
2832 * that they are running in parallel and in an unknown lock context.
2833 */
2834static void (*smp_rv_setup_func)(void *arg);
2835static void (*smp_rv_action_func)(void *arg);
2836static void (*smp_rv_teardown_func)(void *arg);
2837static void *smp_rv_func_arg;
2838static volatile int smp_rv_waiters[2];
2839
2840void
2841smp_rendezvous_action(void)
2842{
2843 /* setup function */
2844 if (smp_rv_setup_func != NULL)
2845 smp_rv_setup_func(smp_rv_func_arg);
2846 /* spin on entry rendezvous */
2847 atomic_add_int(&smp_rv_waiters[0], 1);
2848 while (smp_rv_waiters[0] < mp_ncpus)
2849 ;
2850 /* action function */
2851 if (smp_rv_action_func != NULL)
2852 smp_rv_action_func(smp_rv_func_arg);
2853 /* spin on exit rendezvous */
2854 atomic_add_int(&smp_rv_waiters[1], 1);
2855 while (smp_rv_waiters[1] < mp_ncpus)
2856 ;
2857 /* teardown function */
2858 if (smp_rv_teardown_func != NULL)
2859 smp_rv_teardown_func(smp_rv_func_arg);
2860}
2861
2862void
2863smp_rendezvous(void (* setup_func)(void *),
2864 void (* action_func)(void *),
2865 void (* teardown_func)(void *),
2866 void *arg)
2867{
2868
2869 /* obtain rendezvous lock */
2870 mtx_enter(&smp_rv_mtx, MTX_SPIN);
2870 mtx_lock_spin(&smp_rv_mtx);
2871
2872 /* set static function pointers */
2873 smp_rv_setup_func = setup_func;
2874 smp_rv_action_func = action_func;
2875 smp_rv_teardown_func = teardown_func;
2876 smp_rv_func_arg = arg;
2877 smp_rv_waiters[0] = 0;
2878 smp_rv_waiters[1] = 0;
2879
2880 /*
2881 * signal other processors, which will enter the IPI with interrupts off
2882 */
2883 all_but_self_ipi(XRENDEZVOUS_OFFSET);
2884
2885 /* call executor function */
2886 smp_rendezvous_action();
2887
2888 /* release lock */
2871
2872 /* set static function pointers */
2873 smp_rv_setup_func = setup_func;
2874 smp_rv_action_func = action_func;
2875 smp_rv_teardown_func = teardown_func;
2876 smp_rv_func_arg = arg;
2877 smp_rv_waiters[0] = 0;
2878 smp_rv_waiters[1] = 0;
2879
2880 /*
2881 * signal other processors, which will enter the IPI with interrupts off
2882 */
2883 all_but_self_ipi(XRENDEZVOUS_OFFSET);
2884
2885 /* call executor function */
2886 smp_rendezvous_action();
2887
2888 /* release lock */
2889 mtx_exit(&smp_rv_mtx, MTX_SPIN);
2889 mtx_unlock_spin(&smp_rv_mtx);
2890}
2891
2892void
2893release_aps(void *dummy __unused)
2894{
2895 atomic_store_rel_int(&aps_ready, 1);
2896}
2897
2898SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2890}
2891
2892void
2893release_aps(void *dummy __unused)
2894{
2895 atomic_store_rel_int(&aps_ready, 1);
2896}
2897
2898SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);