apic_vector.s revision 61130
176259Sgreen/*
276259Sgreen *	from: vector.s, 386BSD 0.1 unknown origin
376259Sgreen * $FreeBSD: head/sys/i386/i386/apic_vector.s 61130 2000-05-31 16:36:20Z bde $
476259Sgreen */
576259Sgreen
676259Sgreen
776259Sgreen#include <machine/apic.h>
876259Sgreen#include <machine/smp.h>
976259Sgreen
1076259Sgreen#include "i386/isa/intr_machdep.h"
1176259Sgreen
1276259Sgreen/*
1376259Sgreen * Interrupts must be enabled while waiting for the MP lock.
1476259Sgreen */
1576259Sgreen
1676259Sgreen#define GET_FAST_INTR_LOCK						\
1776259Sgreen	sti; call _get_mplock; cli
1876259Sgreen
1976259Sgreen#define REL_FAST_INTR_LOCK						\
2076259Sgreen	movl	$_mp_lock, %edx ; /* GIANT_LOCK */			\
2176259Sgreen	call	_MPrellock_edx
2276259Sgreen
23/* convert an absolute IRQ# into a bitmask */
24#define IRQ_BIT(irq_num)	(1 << (irq_num))
25
26/* make an index into the IO APIC from the IRQ# */
27#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
28
29
30/*
31 * Macros for interrupt interrupt entry, call to handler, and exit.
32 */
33
34#define	FAST_INTR(irq_num, vec_name)					\
35	.text ;								\
36	SUPERALIGN_TEXT ;						\
37IDTVEC(vec_name) ;							\
38	pushl	%eax ;		/* save only call-used registers */	\
39	pushl	%ecx ;							\
40	pushl	%edx ;							\
41	pushl	%ds ;							\
42	MAYBE_PUSHL_ES ;						\
43	pushl	%fs ;							\
44	movl	$KDSEL,%eax ;						\
45	mov	%ax,%ds ;						\
46	MAYBE_MOVW_AX_ES ;						\
47	movl	$KPSEL,%eax ;						\
48	mov	%ax,%fs ;						\
49	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
50	pushl	_intr_unit + (irq_num) * 4 ;				\
51	GET_FAST_INTR_LOCK ;						\
52	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
53	REL_FAST_INTR_LOCK ;						\
54	addl	$4, %esp ;						\
55	movl	$0, lapic_eoi ;						\
56	lock ; 								\
57	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
58	movl	_intr_countp + (irq_num) * 4, %eax ;			\
59	lock ; 								\
60	incl	(%eax) ;						\
61	MEXITCOUNT ;							\
62	popl	%fs ;							\
63	MAYBE_POPL_ES ;							\
64	popl	%ds ;							\
65	popl	%edx ;							\
66	popl	%ecx ;							\
67	popl	%eax ;							\
68	iret
69
70/*
71 *
72 */
73#define PUSH_FRAME							\
74	pushl	$0 ;		/* dummy error code */			\
75	pushl	$0 ;		/* dummy trap type */			\
76	pushal ;							\
77	pushl	%ds ;		/* save data and extra segments ... */	\
78	pushl	%es ;							\
79	pushl	%fs
80
81#define POP_FRAME							\
82	popl	%fs ;							\
83	popl	%es ;							\
84	popl	%ds ;							\
85	popal ;								\
86	addl	$4+4,%esp
87
88#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
89#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
90
91#define MASK_IRQ(irq_num)						\
92	IMASK_LOCK ;				/* into critical reg */	\
93	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
94	jne	7f ;			/* masked, don't mask */	\
95	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
96	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
97	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
98	movl	%eax, (%ecx) ;			/* write the index */	\
99	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
100	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
101	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1027: ;						/* already masked */	\
103	IMASK_UNLOCK
104/*
105 * Test to see whether we are handling an edge or level triggered INT.
106 *  Level-triggered INTs must still be masked as we don't clear the source,
107 *  and the EOI cycle would cause redundant INTs to occur.
108 */
109#define MASK_LEVEL_IRQ(irq_num)						\
110	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
111	jz	9f ;				/* edge, don't mask */	\
112	MASK_IRQ(irq_num) ;						\
1139:
114
115
116#ifdef APIC_INTR_REORDER
117#define EOI_IRQ(irq_num)						\
118	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
119	movl	(%eax), %eax ;						\
120	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
121	jz	9f ;				/* not active */	\
122	movl	$0, lapic_eoi ;						\
123	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1249:
125
126#else
127#define EOI_IRQ(irq_num)						\
128	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
129	jz	9f	;			/* not active */	\
130	movl	$0, lapic_eoi;						\
131	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1329:
133#endif
134
135
136/*
137 * Test to see if the source is currntly masked, clear if so.
138 */
139#define UNMASK_IRQ(irq_num)					\
140	IMASK_LOCK ;				/* into critical reg */	\
141	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
142	je	7f ;			/* bit clear, not masked */	\
143	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
144	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
145	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
146	movl	%eax,(%ecx) ;			/* write the index */	\
147	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
148	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
149	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1507: ;									\
151	IMASK_UNLOCK
152
153#ifdef APIC_INTR_DIAGNOSTIC
154#ifdef APIC_INTR_DIAGNOSTIC_IRQ
155log_intr_event:
156	pushf
157	cli
158	pushl	$CNAME(apic_itrace_debuglock)
159	call	CNAME(s_lock_np)
160	addl	$4, %esp
161	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
162	andl	$32767, %ecx
163	movl	_cpuid, %eax
164	shll	$8,	%eax
165	orl	8(%esp), %eax
166	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
167	incl	%ecx
168	andl	$32767, %ecx
169	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
170	pushl	$CNAME(apic_itrace_debuglock)
171	call	CNAME(s_unlock_np)
172	addl	$4, %esp
173	popf
174	ret
175
176
177#define APIC_ITRACE(name, irq_num, id)					\
178	lock ;					/* MP-safe */		\
179	incl	CNAME(name) + (irq_num) * 4 ;				\
180	pushl	%eax ;							\
181	pushl	%ecx ;							\
182	pushl	%edx ;							\
183	movl	$(irq_num), %eax ;					\
184	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
185	jne	7f ;							\
186	pushl	$id ;							\
187	call	log_intr_event ;					\
188	addl	$4, %esp ;						\
1897: ;									\
190	popl	%edx ;							\
191	popl	%ecx ;							\
192	popl	%eax
193#else
194#define APIC_ITRACE(name, irq_num, id)					\
195	lock ;					/* MP-safe */		\
196	incl	CNAME(name) + (irq_num) * 4
197#endif
198
199#define APIC_ITRACE_ENTER 1
200#define APIC_ITRACE_EOI 2
201#define APIC_ITRACE_TRYISRLOCK 3
202#define APIC_ITRACE_GOTISRLOCK 4
203#define APIC_ITRACE_ENTER2 5
204#define APIC_ITRACE_LEAVE 6
205#define APIC_ITRACE_UNMASK 7
206#define APIC_ITRACE_ACTIVE 8
207#define APIC_ITRACE_MASKED 9
208#define APIC_ITRACE_NOISRLOCK 10
209#define APIC_ITRACE_MASKED2 11
210#define APIC_ITRACE_SPLZ 12
211#define APIC_ITRACE_DORETI 13
212
213#else
214#define APIC_ITRACE(name, irq_num, id)
215#endif
216
217#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
218	.text ;								\
219	SUPERALIGN_TEXT ;						\
220/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
221IDTVEC(vec_name) ;							\
222	PUSH_FRAME ;							\
223	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
224	mov	%ax, %ds ;						\
225	mov	%ax, %es ;						\
226	movl	$KPSEL, %eax ;						\
227	mov	%ax, %fs ;						\
228;									\
229	maybe_extra_ipending ;						\
230;									\
231	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
232	lock ;					/* MP-safe */		\
233	btsl	$(irq_num), iactive ;		/* lazy masking */	\
234	jc	1f ;				/* already active */	\
235;									\
236	MASK_LEVEL_IRQ(irq_num) ;					\
237	EOI_IRQ(irq_num) ;						\
2380: ;									\
239	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
240	MP_TRYLOCK ;		/* XXX this is going away... */		\
241	testl	%eax, %eax ;			/* did we get it? */	\
242	jz	3f ;				/* no */		\
243;									\
244	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
245	testl	$IRQ_BIT(irq_num), _cpl ;				\
246	jne	2f ;				/* this INT masked */	\
247;									\
248	incb	_intr_nesting_level ;					\
249;	 								\
250  /* entry point used by doreti_unpend for HWIs. */			\
251__CONCAT(Xresume,irq_num): ;						\
252	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
253	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
254	movl	_intr_countp + (irq_num) * 4, %eax ;			\
255	lock ;	incl	(%eax) ;					\
256;									\
257	movl	_cpl, %eax ;						\
258	pushl	%eax ;							\
259	orl	_intr_mask + (irq_num) * 4, %eax ;			\
260	movl	%eax, _cpl ;						\
261	lock ;								\
262	andl	$~IRQ_BIT(irq_num), _ipending ;				\
263;									\
264	pushl	_intr_unit + (irq_num) * 4 ;				\
265	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
266	sti ;								\
267	call	*_intr_handler + (irq_num) * 4 ;			\
268	cli ;								\
269	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
270;									\
271	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
272	UNMASK_IRQ(irq_num) ;						\
273	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
274	sti ;				/* doreti repeats cli/sti */	\
275	MEXITCOUNT ;							\
276	jmp	_doreti ;						\
277;									\
278	ALIGN_TEXT ;							\
2791: ;						/* active  */		\
280	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
281	MASK_IRQ(irq_num) ;						\
282	EOI_IRQ(irq_num) ;						\
283	lock ;								\
284	orl	$IRQ_BIT(irq_num), _ipending ;				\
285	lock ;								\
286	btsl	$(irq_num), iactive ;		/* still active */	\
287	jnc	0b ;				/* retry */		\
288	POP_FRAME ;							\
289	iret ;		/* XXX:	 iactive bit might be 0 now */		\
290	ALIGN_TEXT ;							\
2912: ;				/* masked by cpl, leave iactive set */	\
292	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
293	lock ;								\
294	orl	$IRQ_BIT(irq_num), _ipending ;				\
295	MP_RELLOCK ;							\
296	POP_FRAME ;							\
297	iret ;								\
298	ALIGN_TEXT ;							\
2993: ; 			/* other cpu has isr lock */			\
300	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
301	lock ;								\
302	orl	$IRQ_BIT(irq_num), _ipending ;				\
303	testl	$IRQ_BIT(irq_num), _cpl ;				\
304	jne	4f ;				/* this INT masked */	\
305	call	forward_irq ;	 /* forward irq to lock holder */	\
306	POP_FRAME ;	 			/* and return */	\
307	iret ;								\
308	ALIGN_TEXT ;							\
3094: ;	 					/* blocked */		\
310	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
311	POP_FRAME ;	 			/* and return */	\
312	iret
313
314/*
315 * Handle "spurious INTerrupts".
316 * Notes:
317 *  This is different than the "spurious INTerrupt" generated by an
318 *   8259 PIC for missing INTs.  See the APIC documentation for details.
319 *  This routine should NOT do an 'EOI' cycle.
320 */
321	.text
322	SUPERALIGN_TEXT
323	.globl _Xspuriousint
324_Xspuriousint:
325
326	/* No EOI cycle used here */
327
328	iret
329
330
331/*
332 * Handle TLB shootdowns.
333 */
334	.text
335	SUPERALIGN_TEXT
336	.globl	_Xinvltlb
337_Xinvltlb:
338	pushl	%eax
339
340#ifdef COUNT_XINVLTLB_HITS
341	pushl	%fs
342	movl	$KPSEL, %eax
343	mov	%ax, %fs
344	movl	_cpuid, %eax
345	popl	%fs
346	ss
347	incl	_xhits(,%eax,4)
348#endif /* COUNT_XINVLTLB_HITS */
349
350	movl	%cr3, %eax		/* invalidate the TLB */
351	movl	%eax, %cr3
352
353	ss				/* stack segment, avoid %ds load */
354	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
355
356	popl	%eax
357	iret
358
359
360#ifdef BETTER_CLOCK
361
362/*
363 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
364 *
365 *  - Stores current cpu state in checkstate_cpustate[cpuid]
366 *      0 == user, 1 == sys, 2 == intr
367 *  - Stores current process in checkstate_curproc[cpuid]
368 *
369 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
370 *
371 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
372 */
373
374	.text
375	SUPERALIGN_TEXT
376	.globl _Xcpucheckstate
377	.globl _checkstate_cpustate
378	.globl _checkstate_curproc
379	.globl _checkstate_pc
380_Xcpucheckstate:
381	pushl	%eax
382	pushl	%ebx
383	pushl	%ds			/* save current data segment */
384	pushl	%fs
385
386	movl	$KDSEL, %eax
387	mov	%ax, %ds		/* use KERNEL data segment */
388	movl	$KPSEL, %eax
389	mov	%ax, %fs
390
391	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
392
393	movl	$0, %ebx
394	movl	20(%esp), %eax
395	andl	$3, %eax
396	cmpl	$3, %eax
397	je	1f
398	testl	$PSL_VM, 24(%esp)
399	jne	1f
400	incl	%ebx			/* system or interrupt */
4011:
402	movl	_cpuid, %eax
403	movl	%ebx, _checkstate_cpustate(,%eax,4)
404	movl	_curproc, %ebx
405	movl	%ebx, _checkstate_curproc(,%eax,4)
406	movl	16(%esp), %ebx
407	movl	%ebx, _checkstate_pc(,%eax,4)
408
409	lock				/* checkstate_probed_cpus |= (1<<id) */
410	btsl	%eax, _checkstate_probed_cpus
411
412	popl	%fs
413	popl	%ds			/* restore previous data segment */
414	popl	%ebx
415	popl	%eax
416	iret
417
418#endif /* BETTER_CLOCK */
419
420/*
421 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
422 *
423 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
424 *
425 *  - We need a better method of triggering asts on other cpus.
426 */
427
428	.text
429	SUPERALIGN_TEXT
430	.globl _Xcpuast
431_Xcpuast:
432	PUSH_FRAME
433	movl	$KDSEL, %eax
434	mov	%ax, %ds		/* use KERNEL data segment */
435	mov	%ax, %es
436	movl	$KPSEL, %eax
437	mov	%ax, %fs
438
439	movl	_cpuid, %eax
440	lock				/* checkstate_need_ast &= ~(1<<id) */
441	btrl	%eax, _checkstate_need_ast
442	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
443
444	lock
445	btsl	%eax, _checkstate_pending_ast
446	jc	1f
447
448	FAKE_MCOUNT(13*4(%esp))
449
450	/*
451	 * Giant locks do not come cheap.
452	 * A lot of cycles are going to be wasted here.
453	 */
454	call	_get_mplock
455
456	movl	_cpl, %eax
457	pushl	%eax
458	orl	$AST_PENDING, _astpending	/* XXX */
459	incb	_intr_nesting_level
460	sti
461
462	pushl	$0
463
464	movl	_cpuid, %eax
465	lock
466	btrl	%eax, _checkstate_pending_ast
467	lock
468	btrl	%eax, CNAME(resched_cpus)
469	jnc	2f
470	orl	$AST_PENDING+AST_RESCHED,_astpending
471	lock
472	incl	CNAME(want_resched_cnt)
4732:
474	lock
475	incl	CNAME(cpuast_cnt)
476	MEXITCOUNT
477	jmp	_doreti
4781:
479	/* We are already in the process of delivering an ast for this CPU */
480	POP_FRAME
481	iret
482
483
484/*
485 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
486 */
487
488	.text
489	SUPERALIGN_TEXT
490	.globl _Xforward_irq
491_Xforward_irq:
492	PUSH_FRAME
493	movl	$KDSEL, %eax
494	mov	%ax, %ds		/* use KERNEL data segment */
495	mov	%ax, %es
496	movl	$KPSEL, %eax
497	mov	%ax, %fs
498
499	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
500
501	FAKE_MCOUNT(13*4(%esp))
502
503	MP_TRYLOCK
504	testl	%eax,%eax		/* Did we get the lock ? */
505	jz  1f				/* No */
506
507	lock
508	incl	CNAME(forward_irq_hitcnt)
509	cmpb	$4, _intr_nesting_level
510	jae	2f
511
512	movl	_cpl, %eax
513	pushl	%eax
514	incb	_intr_nesting_level
515	sti
516
517	pushl	$0
518
519	MEXITCOUNT
520	jmp	_doreti			/* Handle forwarded interrupt */
5211:
522	lock
523	incl	CNAME(forward_irq_misscnt)
524	call	forward_irq	/* Oops, we've lost the isr lock */
525	MEXITCOUNT
526	POP_FRAME
527	iret
5282:
529	lock
530	incl	CNAME(forward_irq_toodeepcnt)
5313:
532	MP_RELLOCK
533	MEXITCOUNT
534	POP_FRAME
535	iret
536
537/*
538 *
539 */
540forward_irq:
541	MCOUNT
542	cmpl	$0,_invltlb_ok
543	jz	4f
544
545	cmpl	$0, CNAME(forward_irq_enabled)
546	jz	4f
547
548	movl	_mp_lock,%eax
549	cmpl	$FREE_LOCK,%eax
550	jne	1f
551	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
5521:
553	shrl	$24,%eax
554	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
555	shll	$24,%ecx
556	movl	lapic_icr_hi, %eax
557	andl	$~APIC_ID_MASK, %eax
558	orl	%ecx, %eax
559	movl	%eax, lapic_icr_hi
560
5612:
562	movl	lapic_icr_lo, %eax
563	andl	$APIC_DELSTAT_MASK,%eax
564	jnz	2b
565	movl	lapic_icr_lo, %eax
566	andl	$APIC_RESV2_MASK, %eax
567	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
568	movl	%eax, lapic_icr_lo
5693:
570	movl	lapic_icr_lo, %eax
571	andl	$APIC_DELSTAT_MASK,%eax
572	jnz	3b
5734:
574	ret
575
576/*
577 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
578 *
579 *  - Signals its receipt.
580 *  - Waits for permission to restart.
581 *  - Signals its restart.
582 */
583
584	.text
585	SUPERALIGN_TEXT
586	.globl _Xcpustop
587_Xcpustop:
588	pushl	%ebp
589	movl	%esp, %ebp
590	pushl	%eax
591	pushl	%ecx
592	pushl	%edx
593	pushl	%ds			/* save current data segment */
594	pushl	%fs
595
596	movl	$KDSEL, %eax
597	mov	%ax, %ds		/* use KERNEL data segment */
598	movl	$KPSEL, %eax
599	mov	%ax, %fs
600
601	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
602
603	movl	_cpuid, %eax
604	imull	$PCB_SIZE, %eax
605	leal	CNAME(stoppcbs)(%eax), %eax
606	pushl	%eax
607	call	CNAME(savectx)		/* Save process context */
608	addl	$4, %esp
609
610
611	movl	_cpuid, %eax
612
613	lock
614	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
6151:
616	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
617	jnc	1b
618
619	lock
620	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
621	lock
622	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
623
624	test	%eax, %eax
625	jnz	2f
626
627	movl	CNAME(cpustop_restartfunc), %eax
628	test	%eax, %eax
629	jz	2f
630	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
631
632	call	*%eax
6332:
634	popl	%fs
635	popl	%ds			/* restore previous data segment */
636	popl	%edx
637	popl	%ecx
638	popl	%eax
639	movl	%ebp, %esp
640	popl	%ebp
641	iret
642
643
644MCOUNT_LABEL(bintr)
645	FAST_INTR(0,fastintr0)
646	FAST_INTR(1,fastintr1)
647	FAST_INTR(2,fastintr2)
648	FAST_INTR(3,fastintr3)
649	FAST_INTR(4,fastintr4)
650	FAST_INTR(5,fastintr5)
651	FAST_INTR(6,fastintr6)
652	FAST_INTR(7,fastintr7)
653	FAST_INTR(8,fastintr8)
654	FAST_INTR(9,fastintr9)
655	FAST_INTR(10,fastintr10)
656	FAST_INTR(11,fastintr11)
657	FAST_INTR(12,fastintr12)
658	FAST_INTR(13,fastintr13)
659	FAST_INTR(14,fastintr14)
660	FAST_INTR(15,fastintr15)
661	FAST_INTR(16,fastintr16)
662	FAST_INTR(17,fastintr17)
663	FAST_INTR(18,fastintr18)
664	FAST_INTR(19,fastintr19)
665	FAST_INTR(20,fastintr20)
666	FAST_INTR(21,fastintr21)
667	FAST_INTR(22,fastintr22)
668	FAST_INTR(23,fastintr23)
669#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
670	INTR(0,intr0, CLKINTR_PENDING)
671	INTR(1,intr1,)
672	INTR(2,intr2,)
673	INTR(3,intr3,)
674	INTR(4,intr4,)
675	INTR(5,intr5,)
676	INTR(6,intr6,)
677	INTR(7,intr7,)
678	INTR(8,intr8,)
679	INTR(9,intr9,)
680	INTR(10,intr10,)
681	INTR(11,intr11,)
682	INTR(12,intr12,)
683	INTR(13,intr13,)
684	INTR(14,intr14,)
685	INTR(15,intr15,)
686	INTR(16,intr16,)
687	INTR(17,intr17,)
688	INTR(18,intr18,)
689	INTR(19,intr19,)
690	INTR(20,intr20,)
691	INTR(21,intr21,)
692	INTR(22,intr22,)
693	INTR(23,intr23,)
694MCOUNT_LABEL(eintr)
695
696/*
697 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
698 *
699 * - Calls the generic rendezvous action function.
700 */
701	.text
702	SUPERALIGN_TEXT
703	.globl	_Xrendezvous
704_Xrendezvous:
705	PUSH_FRAME
706	movl	$KDSEL, %eax
707	mov	%ax, %ds		/* use KERNEL data segment */
708	mov	%ax, %es
709	movl	$KPSEL, %eax
710	mov	%ax, %fs
711
712	call	_smp_rendezvous_action
713
714	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
715	POP_FRAME
716	iret
717
718
719	.data
720/*
721 * Addresses of interrupt handlers.
722 *  XresumeNN: Resumption addresses for HWIs.
723 */
724	.globl _ihandlers
725_ihandlers:
726/*
727 * used by:
728 *  ipl.s:	doreti_unpend
729 */
730	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
731	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
732	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
733	.long	Xresume12, Xresume13, Xresume14, Xresume15
734	.long	Xresume16, Xresume17, Xresume18, Xresume19
735	.long	Xresume20, Xresume21, Xresume22, Xresume23
736/*
737 * used by:
738 *  ipl.s:	doreti_unpend
739 *  apic_ipl.s:	splz_unpend
740 */
741	.long	_swi_null, swi_net, _swi_null, _swi_null
742	.long	_swi_vm, _swi_null, _softclock
743
744imasks:				/* masks for interrupt handlers */
745	.space	NHWI*4		/* padding; HWI masks are elsewhere */
746
747	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
748	.long	SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
749
750/* active flag for lazy masking */
751iactive:
752	.long	0
753
754#ifdef COUNT_XINVLTLB_HITS
755	.globl	_xhits
756_xhits:
757	.space	(NCPU * 4), 0
758#endif /* COUNT_XINVLTLB_HITS */
759
760/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
761	.globl _stopped_cpus, _started_cpus
762_stopped_cpus:
763	.long	0
764_started_cpus:
765	.long	0
766
767#ifdef BETTER_CLOCK
768	.globl _checkstate_probed_cpus
769_checkstate_probed_cpus:
770	.long	0
771#endif /* BETTER_CLOCK */
772	.globl _checkstate_need_ast
773_checkstate_need_ast:
774	.long	0
775_checkstate_pending_ast:
776	.long	0
777	.globl CNAME(forward_irq_misscnt)
778	.globl CNAME(forward_irq_toodeepcnt)
779	.globl CNAME(forward_irq_hitcnt)
780	.globl CNAME(resched_cpus)
781	.globl CNAME(want_resched_cnt)
782	.globl CNAME(cpuast_cnt)
783	.globl CNAME(cpustop_restartfunc)
784CNAME(forward_irq_misscnt):
785	.long 0
786CNAME(forward_irq_hitcnt):
787	.long 0
788CNAME(forward_irq_toodeepcnt):
789	.long 0
790CNAME(resched_cpus):
791	.long 0
792CNAME(want_resched_cnt):
793	.long 0
794CNAME(cpuast_cnt):
795	.long 0
796CNAME(cpustop_restartfunc):
797	.long 0
798
799
800
801	.globl	_apic_pin_trigger
802_apic_pin_trigger:
803	.long	0
804
805	.text
806