apic_vector.s revision 71321
186227Stmm/*
286227Stmm *	from: vector.s, 386BSD 0.1 unknown origin
3128776Stmm * $FreeBSD: head/sys/i386/i386/apic_vector.s 71321 2001-01-21 07:54:10Z peter $
4128776Stmm */
586227Stmm
686227Stmm
786227Stmm#include <machine/apic.h>
886227Stmm#include <machine/smp.h>
986227Stmm
1086227Stmm#include "i386/isa/intr_machdep.h"
1186227Stmm
1286227Stmm/* convert an absolute IRQ# into a bitmask */
1386227Stmm#define IRQ_BIT(irq_num)	(1 << (irq_num))
1486227Stmm
1586227Stmm/* make an index into the IO APIC from the IRQ# */
1686227Stmm#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17128776Stmm
1886227Stmm/*
1986227Stmm *
2086227Stmm */
2186227Stmm#define PUSH_FRAME							\
2286227Stmm	pushl	$0 ;		/* dummy error code */			\
2386227Stmm	pushl	$0 ;		/* dummy trap type */			\
2486227Stmm	pushal ;							\
2586227Stmm	pushl	%ds ;		/* save data and extra segments ... */	\
2686227Stmm	pushl	%es ;							\
2786227Stmm	pushl	%fs
2886227Stmm
2986227Stmm#define POP_FRAME							\
3086227Stmm	popl	%fs ;							\
3186227Stmm	popl	%es ;							\
3286227Stmm	popl	%ds ;							\
3386227Stmm	popal ;								\
3486227Stmm	addl	$4+4,%esp
3586227Stmm
36131376Smarius/*
3786227Stmm * Macros for interrupt entry, call to handler, and exit.
3886227Stmm */
3986227Stmm
4086227Stmm#define	FAST_INTR(irq_num, vec_name)					\
4186227Stmm	.text ;								\
4286227Stmm	SUPERALIGN_TEXT ;						\
43130068SphkIDTVEC(vec_name) ;							\
4486227Stmm	PUSH_FRAME ;							\
4586227Stmm	movl	$KDSEL,%eax ;						\
4686227Stmm	mov	%ax,%ds ;						\
4786227Stmm	mov	%ax,%es ;						\
4886227Stmm	movl	$KPSEL,%eax ;						\
4986227Stmm	mov	%ax,%fs ;						\
5086227Stmm	FAKE_MCOUNT(13*4(%esp)) ;					\
5188823Stmm	incb	PCPU(INTR_NESTING_LEVEL) ;				\
5286227Stmm	pushl	_intr_unit + (irq_num) * 4 ;				\
5388823Stmm	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
5486227Stmm	addl	$4, %esp ;						\
5586227Stmm	movl	$0, _lapic+LA_EOI ;					\
5686227Stmm	lock ; 								\
5786227Stmm	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
5886227Stmm	movl	_intr_countp + (irq_num) * 4, %eax ;			\
59128776Stmm	lock ; 								\
6086227Stmm	incl	(%eax) ;						\
61128776Stmm	MEXITCOUNT ;							\
6286227Stmm	jmp	_doreti
6386227Stmm
6486227Stmm#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
6586227Stmm#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
6686227Stmm
6786227Stmm#define MASK_IRQ(irq_num)						\
6886227Stmm	IMASK_LOCK ;				/* into critical reg */	\
6986227Stmm	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
7086227Stmm	jne	7f ;			/* masked, don't mask */	\
7186227Stmm	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
7286227Stmm	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
7386227Stmm	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
7486227Stmm	movl	%eax, (%ecx) ;			/* write the index */	\
7586227Stmm	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
7686227Stmm	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
7786227Stmm	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
7888823Stmm7: ;						/* already masked */	\
7986227Stmm	IMASK_UNLOCK
8086227Stmm/*
8186227Stmm * Test to see whether we are handling an edge or level triggered INT.
8286227Stmm *  Level-triggered INTs must still be masked as we don't clear the source,
8386227Stmm *  and the EOI cycle would cause redundant INTs to occur.
8488823Stmm */
8588823Stmm#define MASK_LEVEL_IRQ(irq_num)						\
8688823Stmm	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
8788823Stmm	jz	9f ;				/* edge, don't mask */	\
8888823Stmm	MASK_IRQ(irq_num) ;						\
89128776Stmm9:
90128776Stmm
91128776Stmm
92128776Stmm#ifdef APIC_INTR_REORDER
93128776Stmm#define EOI_IRQ(irq_num)						\
94128776Stmm	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
95128776Stmm	movl	(%eax), %eax ;						\
96128776Stmm	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
97128776Stmm	jz	9f ;				/* not active */	\
98128776Stmm	movl	$0, _lapic+LA_EOI ;					\
9986227Stmm9:
10086227Stmm
10186227Stmm#else
10286227Stmm#define EOI_IRQ(irq_num)						\
103128776Stmm	testl	$IRQ_BIT(irq_num), _lapic+LA_ISR1;			\
10486227Stmm	jz	9f	;			/* not active */	\
10586227Stmm	movl	$0, _lapic+LA_EOI;					\
10686227Stmm9:
10786227Stmm#endif
10886227Stmm
109128776Stmm
11086227Stmm/*
11186227Stmm * Test to see if the source is currently masked, clear if so.
11286227Stmm */
11386227Stmm#define UNMASK_IRQ(irq_num)					\
11486227Stmm	IMASK_LOCK ;				/* into critical reg */	\
11586227Stmm	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
11686227Stmm	je	7f ;			/* bit clear, not masked */	\
11786227Stmm	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
11886227Stmm	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
11986227Stmm	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
12086227Stmm	movl	%eax, (%ecx) ;			/* write the index */	\
12186227Stmm	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
12286227Stmm	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
12386227Stmm	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
12486227Stmm7: ;						/* already unmasked */	\
12586227Stmm	IMASK_UNLOCK
12688823Stmm
12786227Stmm/*
12886227Stmm * Slow, threaded interrupts.
12986227Stmm *
13086227Stmm * XXX Most of the parameters here are obsolete.  Fix this when we're
13186227Stmm * done.
13286227Stmm * XXX we really shouldn't return via doreti if we just schedule the
13386227Stmm * interrupt handler and don't run anything.  We could just do an
13490622Stmm * iret.  FIXME.
13590622Stmm */
13690622Stmm#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
13786227Stmm	.text ;								\
13890622Stmm	SUPERALIGN_TEXT ;						\
13986227Stmm/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
14086227StmmIDTVEC(vec_name) ;							\
14190622Stmm	PUSH_FRAME ;							\
14286227Stmm	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
14386227Stmm	mov	%ax, %ds ;						\
14486227Stmm	mov	%ax, %es ;						\
14586227Stmm	movl	$KPSEL, %eax ;						\
14686227Stmm	mov	%ax, %fs ;						\
14786227Stmm;									\
14886227Stmm	maybe_extra_ipending ;						\
14986227Stmm;									\
15086227Stmm	MASK_LEVEL_IRQ(irq_num) ;					\
15186227Stmm	EOI_IRQ(irq_num) ;						\
15286227Stmm0: ;									\
15386227Stmm	incb	PCPU(INTR_NESTING_LEVEL) ;				\
15486227Stmm;	 								\
15586227Stmm  /* entry point used by doreti_unpend for HWIs. */			\
15686227Stmm__CONCAT(Xresume,irq_num): ;						\
15786227Stmm	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
15886227Stmm	pushl	$irq_num;			/* pass the IRQ */	\
15986227Stmm	sti ;								\
16086227Stmm	call	_sched_ithd ;						\
16186227Stmm	addl	$4, %esp ;		/* discard the parameter */	\
16286227Stmm;									\
16386227Stmm	MEXITCOUNT ;							\
16486227Stmm	jmp	_doreti
16586227Stmm
16686227Stmm/*
16786227Stmm * Handle "spurious INTerrupts".
16886227Stmm * Notes:
16986227Stmm *  This is different than the "spurious INTerrupt" generated by an
17086227Stmm *   8259 PIC for missing INTs.  See the APIC documentation for details.
171128776Stmm *  This routine should NOT do an 'EOI' cycle.
172128776Stmm */
173128776Stmm	.text
174128776Stmm	SUPERALIGN_TEXT
175128776Stmm	.globl _Xspuriousint
176128776Stmm_Xspuriousint:
177128776Stmm
178128776Stmm	/* No EOI cycle used here */
179128776Stmm
18086227Stmm	iret
18186227Stmm
18286227Stmm
18386227Stmm/*
18488823Stmm * Handle TLB shootdowns.
18586227Stmm */
18686227Stmm	.text
18786227Stmm	SUPERALIGN_TEXT
18886227Stmm	.globl	_Xinvltlb
18986227Stmm_Xinvltlb:
19088823Stmm	pushl	%eax
19188823Stmm
19288823Stmm#ifdef COUNT_XINVLTLB_HITS
19388823Stmm	pushl	%fs
19488823Stmm	movl	$KPSEL, %eax
19588823Stmm	mov	%ax, %fs
19688823Stmm	movl	PCPU(CPUID), %eax
19797265Sjake	popl	%fs
19888823Stmm	ss
199128776Stmm	incl	_xhits(,%eax,4)
20086227Stmm#endif /* COUNT_XINVLTLB_HITS */
20186227Stmm
202128776Stmm	movl	%cr3, %eax		/* invalidate the TLB */
20386227Stmm	movl	%eax, %cr3
20486227Stmm
20586227Stmm	ss				/* stack segment, avoid %ds load */
20686227Stmm	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
20786227Stmm
208128776Stmm	popl	%eax
20986227Stmm	iret
21088823Stmm
21186227Stmm
212128776Stmm#ifdef BETTER_CLOCK
213128776Stmm
214128776Stmm/*
215128776Stmm * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
216128776Stmm *
217128776Stmm *  - Stores current cpu state in checkstate_cpustate[cpuid]
218128776Stmm *      0 == user, 1 == sys, 2 == intr
219128776Stmm *  - Stores current process in checkstate_curproc[cpuid]
220128776Stmm *
221128776Stmm *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
222128776Stmm *
223128776Stmm * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
224128776Stmm */
225128776Stmm
22686227Stmm	.text
227128776Stmm	SUPERALIGN_TEXT
22886227Stmm	.globl _Xcpucheckstate
22986227Stmm	.globl _checkstate_cpustate
23086227Stmm	.globl _checkstate_curproc
23186227Stmm	.globl _checkstate_pc
23286227Stmm_Xcpucheckstate:
23386227Stmm	pushl	%eax
23486227Stmm	pushl	%ebx
235128939Smarius	pushl	%ds			/* save current data segment */
23686227Stmm	pushl	%fs
23786227Stmm
238128939Smarius	movl	$KDSEL, %eax
23986227Stmm	mov	%ax, %ds		/* use KERNEL data segment */
24086227Stmm	movl	$KPSEL, %eax
24186227Stmm	mov	%ax, %fs
24286227Stmm
24386227Stmm	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
24486227Stmm
24586227Stmm	movl	$0, %ebx
24686227Stmm	movl	20(%esp), %eax
24786227Stmm	andl	$3, %eax
24886227Stmm	cmpl	$3, %eax
24986227Stmm	je	1f
25086227Stmm	testl	$PSL_VM, 24(%esp)
25186227Stmm	jne	1f
25286227Stmm	incl	%ebx			/* system or interrupt */
25386227Stmm1:
25486227Stmm	movl	PCPU(CPUID), %eax
25586227Stmm	movl	%ebx, _checkstate_cpustate(,%eax,4)
25686227Stmm	movl	PCPU(CURPROC), %ebx
25786227Stmm	movl	%ebx, _checkstate_curproc(,%eax,4)
25886227Stmm
25986227Stmm	movl	16(%esp), %ebx
26086227Stmm	movl	%ebx, _checkstate_pc(,%eax,4)
26186227Stmm
26286227Stmm	lock				/* checkstate_probed_cpus |= (1<<id) */
26386227Stmm	btsl	%eax, _checkstate_probed_cpus
26486227Stmm
26586227Stmm	popl	%fs
26686227Stmm	popl	%ds			/* restore previous data segment */
26786227Stmm	popl	%ebx
26886227Stmm	popl	%eax
26986227Stmm	iret
27086227Stmm
27186227Stmm#endif /* BETTER_CLOCK */
27286227Stmm
27386227Stmm/*
274128776Stmm * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
27586227Stmm *
27686227Stmm *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
27786227Stmm *
27886227Stmm *  - We need a better method of triggering asts on other cpus.
27986227Stmm */
28086227Stmm
28186227Stmm	.text
28286227Stmm	SUPERALIGN_TEXT
28386227Stmm	.globl _Xcpuast
28486227Stmm_Xcpuast:
28586227Stmm	PUSH_FRAME
28686227Stmm	movl	$KDSEL, %eax
28788823Stmm	mov	%ax, %ds		/* use KERNEL data segment */
28886227Stmm	mov	%ax, %es
28986227Stmm	movl	$KPSEL, %eax
29086227Stmm	mov	%ax, %fs
291131535Simp
29286227Stmm	movl	PCPU(CPUID), %eax
29386227Stmm	lock				/* checkstate_need_ast &= ~(1<<id) */
294128776Stmm	btrl	%eax, _checkstate_need_ast
29586227Stmm	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
29686227Stmm
29786227Stmm	lock
29886227Stmm	btsl	%eax, _checkstate_pending_ast
299131535Simp	jc	1f
30086227Stmm
30186227Stmm	FAKE_MCOUNT(13*4(%esp))
30286227Stmm
30386227Stmm	orl	$AST_PENDING, PCPU(ASTPENDING)	/* XXX */
30486227Stmm	incb	PCPU(INTR_NESTING_LEVEL)
30586227Stmm	sti
30686227Stmm
30786227Stmm	movl	PCPU(CPUID), %eax
308131535Simp	lock
30986227Stmm	btrl	%eax, _checkstate_pending_ast
31086227Stmm	lock
31186227Stmm	btrl	%eax, CNAME(resched_cpus)
31286227Stmm	jnc	2f
31386227Stmm	orl	$AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
31486227Stmm	lock
31586227Stmm	incl	CNAME(want_resched_cnt)
31688823Stmm2:
31786227Stmm	lock
31886227Stmm	incl	CNAME(cpuast_cnt)
31986227Stmm	MEXITCOUNT
32086227Stmm	jmp	_doreti
32186227Stmm1:
32286227Stmm	/* We are already in the process of delivering an ast for this CPU */
32386227Stmm	POP_FRAME
32486227Stmm	iret
32588823Stmm
32686227Stmm
32788823Stmm/*
32888823Stmm *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
32988823Stmm */
33086227Stmm
33186227Stmm	.text
33286227Stmm	SUPERALIGN_TEXT
33386227Stmm	.globl _Xforward_irq
33486227Stmm_Xforward_irq:
33586227Stmm	PUSH_FRAME
33686227Stmm	movl	$KDSEL, %eax
33788823Stmm	mov	%ax, %ds		/* use KERNEL data segment */
33888823Stmm	mov	%ax, %es
33988823Stmm	movl	$KPSEL, %eax
34088823Stmm	mov	%ax, %fs
34186227Stmm
34286227Stmm	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
343128776Stmm
34486227Stmm	FAKE_MCOUNT(13*4(%esp))
34586227Stmm
34686227Stmm	lock
34786227Stmm	incl	CNAME(forward_irq_hitcnt)
34888823Stmm	cmpb	$4, PCPU(INTR_NESTING_LEVEL)
34986227Stmm	jae	1f
35086227Stmm
35186227Stmm	incb	PCPU(INTR_NESTING_LEVEL)
35286227Stmm	sti
35386227Stmm
35486227Stmm	MEXITCOUNT
35586227Stmm	jmp	doreti_next		/* Handle forwarded interrupt */
35686227Stmm1:
35786227Stmm	lock
35886227Stmm	incl	CNAME(forward_irq_toodeepcnt)
35986227Stmm	MEXITCOUNT
36086227Stmm	POP_FRAME
36186227Stmm	iret
36286227Stmm
36386227Stmm#if 0
36486227Stmm/*
36588823Stmm *
36686227Stmm */
36786227Stmmforward_irq:
36886227Stmm	MCOUNT
36986227Stmm	cmpl	$0,_invltlb_ok
37086227Stmm	jz	4f
37186227Stmm
37286227Stmm	cmpl	$0, CNAME(forward_irq_enabled)
37386227Stmm	jz	4f
37486227Stmm
37586227Stmm/* XXX - this is broken now, because mp_lock doesn't exist
37686227Stmm	movl	_mp_lock,%eax
37786227Stmm	cmpl	$FREE_LOCK,%eax
37886227Stmm	jne	1f
379128776Stmm */
38086227Stmm	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
38186227Stmm1:
38286227Stmm	shrl	$24,%eax
383	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
384	shll	$24,%ecx
385	movl	_lapic+LA_ICR_HI, %eax
386	andl	$~APIC_ID_MASK, %eax
387	orl	%ecx, %eax
388	movl	%eax, _lapic+LA_ICR_HI
389
3902:
391	movl	_lapic+LA_ICR_LO, %eax
392	andl	$APIC_DELSTAT_MASK,%eax
393	jnz	2b
394	movl	_lapic+LA_ICR_LO, %eax
395	andl	$APIC_RESV2_MASK, %eax
396	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
397	movl	%eax, _lapic+LA_ICR_LO
3983:
399	movl	_lapic+LA_ICR_LO, %eax
400	andl	$APIC_DELSTAT_MASK,%eax
401	jnz	3b
4024:
403	ret
404#endif
405
406/*
407 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
408 *
409 *  - Signals its receipt.
410 *  - Waits for permission to restart.
411 *  - Signals its restart.
412 */
413
414	.text
415	SUPERALIGN_TEXT
416	.globl _Xcpustop
417_Xcpustop:
418	pushl	%ebp
419	movl	%esp, %ebp
420	pushl	%eax
421	pushl	%ecx
422	pushl	%edx
423	pushl	%ds			/* save current data segment */
424	pushl	%fs
425
426	movl	$KDSEL, %eax
427	mov	%ax, %ds		/* use KERNEL data segment */
428	movl	$KPSEL, %eax
429	mov	%ax, %fs
430
431	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
432
433	movl	PCPU(CPUID), %eax
434	imull	$PCB_SIZE, %eax
435	leal	CNAME(stoppcbs)(%eax), %eax
436	pushl	%eax
437	call	CNAME(savectx)		/* Save process context */
438	addl	$4, %esp
439
440
441	movl	PCPU(CPUID), %eax
442
443	lock
444	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
4451:
446	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
447	jnc	1b
448
449	lock
450	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
451	lock
452	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
453
454	test	%eax, %eax
455	jnz	2f
456
457	movl	CNAME(cpustop_restartfunc), %eax
458	test	%eax, %eax
459	jz	2f
460	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
461
462	call	*%eax
4632:
464	popl	%fs
465	popl	%ds			/* restore previous data segment */
466	popl	%edx
467	popl	%ecx
468	popl	%eax
469	movl	%ebp, %esp
470	popl	%ebp
471	iret
472
473
474MCOUNT_LABEL(bintr)
475	FAST_INTR(0,fastintr0)
476	FAST_INTR(1,fastintr1)
477	FAST_INTR(2,fastintr2)
478	FAST_INTR(3,fastintr3)
479	FAST_INTR(4,fastintr4)
480	FAST_INTR(5,fastintr5)
481	FAST_INTR(6,fastintr6)
482	FAST_INTR(7,fastintr7)
483	FAST_INTR(8,fastintr8)
484	FAST_INTR(9,fastintr9)
485	FAST_INTR(10,fastintr10)
486	FAST_INTR(11,fastintr11)
487	FAST_INTR(12,fastintr12)
488	FAST_INTR(13,fastintr13)
489	FAST_INTR(14,fastintr14)
490	FAST_INTR(15,fastintr15)
491	FAST_INTR(16,fastintr16)
492	FAST_INTR(17,fastintr17)
493	FAST_INTR(18,fastintr18)
494	FAST_INTR(19,fastintr19)
495	FAST_INTR(20,fastintr20)
496	FAST_INTR(21,fastintr21)
497	FAST_INTR(22,fastintr22)
498	FAST_INTR(23,fastintr23)
499	FAST_INTR(24,fastintr24)
500	FAST_INTR(25,fastintr25)
501	FAST_INTR(26,fastintr26)
502	FAST_INTR(27,fastintr27)
503	FAST_INTR(28,fastintr28)
504	FAST_INTR(29,fastintr29)
505	FAST_INTR(30,fastintr30)
506	FAST_INTR(31,fastintr31)
507#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
508/* Threaded interrupts */
509	INTR(0,intr0, CLKINTR_PENDING)
510	INTR(1,intr1,)
511	INTR(2,intr2,)
512	INTR(3,intr3,)
513	INTR(4,intr4,)
514	INTR(5,intr5,)
515	INTR(6,intr6,)
516	INTR(7,intr7,)
517	INTR(8,intr8,)
518	INTR(9,intr9,)
519	INTR(10,intr10,)
520	INTR(11,intr11,)
521	INTR(12,intr12,)
522	INTR(13,intr13,)
523	INTR(14,intr14,)
524	INTR(15,intr15,)
525	INTR(16,intr16,)
526	INTR(17,intr17,)
527	INTR(18,intr18,)
528	INTR(19,intr19,)
529	INTR(20,intr20,)
530	INTR(21,intr21,)
531	INTR(22,intr22,)
532	INTR(23,intr23,)
533	INTR(24,intr24,)
534	INTR(25,intr25,)
535	INTR(26,intr26,)
536	INTR(27,intr27,)
537	INTR(28,intr28,)
538	INTR(29,intr29,)
539	INTR(30,intr30,)
540	INTR(31,intr31,)
541MCOUNT_LABEL(eintr)
542
543/*
544 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
545 *
546 * - Calls the generic rendezvous action function.
547 */
548	.text
549	SUPERALIGN_TEXT
550	.globl	_Xrendezvous
551_Xrendezvous:
552	PUSH_FRAME
553	movl	$KDSEL, %eax
554	mov	%ax, %ds		/* use KERNEL data segment */
555	mov	%ax, %es
556	movl	$KPSEL, %eax
557	mov	%ax, %fs
558
559	call	_smp_rendezvous_action
560
561	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
562	POP_FRAME
563	iret
564
565
566	.data
567#if 0
568/* active flag for lazy masking */
569iactive:
570	.long	0
571#endif
572
573#ifdef COUNT_XINVLTLB_HITS
574	.globl	_xhits
575_xhits:
576	.space	(NCPU * 4), 0
577#endif /* COUNT_XINVLTLB_HITS */
578
579/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
580	.globl _stopped_cpus, _started_cpus
581_stopped_cpus:
582	.long	0
583_started_cpus:
584	.long	0
585
586#ifdef BETTER_CLOCK
587	.globl _checkstate_probed_cpus
588_checkstate_probed_cpus:
589	.long	0
590#endif /* BETTER_CLOCK */
591	.globl _checkstate_need_ast
592_checkstate_need_ast:
593	.long	0
594_checkstate_pending_ast:
595	.long	0
596	.globl CNAME(forward_irq_misscnt)
597	.globl CNAME(forward_irq_toodeepcnt)
598	.globl CNAME(forward_irq_hitcnt)
599	.globl CNAME(resched_cpus)
600	.globl CNAME(want_resched_cnt)
601	.globl CNAME(cpuast_cnt)
602	.globl CNAME(cpustop_restartfunc)
603CNAME(forward_irq_misscnt):
604	.long 0
605CNAME(forward_irq_hitcnt):
606	.long 0
607CNAME(forward_irq_toodeepcnt):
608	.long 0
609CNAME(resched_cpus):
610	.long 0
611CNAME(want_resched_cnt):
612	.long 0
613CNAME(cpuast_cnt):
614	.long 0
615CNAME(cpustop_restartfunc):
616	.long 0
617
618	.globl	_apic_pin_trigger
619_apic_pin_trigger:
620	.long	0
621
622	.text
623