apic_vector.s revision 61339
11590Srgrimes/*
21590Srgrimes *	from: vector.s, 386BSD 0.1 unknown origin
31590Srgrimes * $FreeBSD: head/sys/i386/i386/apic_vector.s 61339 2000-06-06 15:28:00Z dillon $
41590Srgrimes */
51590Srgrimes
61590Srgrimes
71590Srgrimes#include <machine/apic.h>
81590Srgrimes#include <machine/smp.h>
91590Srgrimes
101590Srgrimes#include "i386/isa/intr_machdep.h"
111590Srgrimes
121590Srgrimes/* convert an absolute IRQ# into a bitmask */
131590Srgrimes#define IRQ_BIT(irq_num)	(1 << (irq_num))
141590Srgrimes
151590Srgrimes/* make an index into the IO APIC from the IRQ# */
161590Srgrimes#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
171590Srgrimes
181590Srgrimes
191590Srgrimes/*
201590Srgrimes * Macros for interrupt interrupt entry, call to handler, and exit.
211590Srgrimes */
221590Srgrimes
231590Srgrimes#define	FAST_INTR(irq_num, vec_name)					\
241590Srgrimes	.text ;								\
251590Srgrimes	SUPERALIGN_TEXT ;						\
261590SrgrimesIDTVEC(vec_name) ;							\
271590Srgrimes	pushl	%eax ;		/* save only call-used registers */	\
281590Srgrimes	pushl	%ecx ;							\
291590Srgrimes	pushl	%edx ;							\
301590Srgrimes	pushl	%ds ;							\
311590Srgrimes	MAYBE_PUSHL_ES ;						\
321590Srgrimes	pushl	%fs ;							\
331590Srgrimes	movl	$KDSEL,%eax ;						\
341590Srgrimes	mov	%ax,%ds ;						\
351590Srgrimes	MAYBE_MOVW_AX_ES ;						\
361590Srgrimes	movl	$KPSEL,%eax ;						\
371590Srgrimes	mov	%ax,%fs ;						\
381590Srgrimes	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
391590Srgrimes	pushl	_intr_unit + (irq_num) * 4 ;				\
401590Srgrimes	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
411590Srgrimes	addl	$4, %esp ;						\
421590Srgrimes	movl	$0, lapic_eoi ;						\
431590Srgrimes	lock ; 								\
441590Srgrimes	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
451590Srgrimes	movl	_intr_countp + (irq_num) * 4, %eax ;			\
461590Srgrimes	lock ; 								\
471590Srgrimes	incl	(%eax) ;						\
481590Srgrimes	MEXITCOUNT ;							\
491590Srgrimes	popl	%fs ;							\
501590Srgrimes	MAYBE_POPL_ES ;							\
511590Srgrimes	popl	%ds ;							\
521590Srgrimes	popl	%edx ;							\
531590Srgrimes	popl	%ecx ;							\
541590Srgrimes	popl	%eax ;							\
551590Srgrimes	iret
561590Srgrimes
571590Srgrimes/*
581590Srgrimes *
591590Srgrimes */
601590Srgrimes#define PUSH_FRAME							\
611590Srgrimes	pushl	$0 ;		/* dummy error code */			\
621590Srgrimes	pushl	$0 ;		/* dummy trap type */			\
631590Srgrimes	pushal ;							\
641590Srgrimes	pushl	%ds ;		/* save data and extra segments ... */	\
65	pushl	%es ;							\
66	pushl	%fs
67
68#define POP_FRAME							\
69	popl	%fs ;							\
70	popl	%es ;							\
71	popl	%ds ;							\
72	popal ;								\
73	addl	$4+4,%esp
74
75#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
76#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
77
78#define MASK_IRQ(irq_num)						\
79	IMASK_LOCK ;				/* into critical reg */	\
80	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
81	jne	7f ;			/* masked, don't mask */	\
82	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
83	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
84	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
85	movl	%eax, (%ecx) ;			/* write the index */	\
86	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
87	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
88	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
897: ;						/* already masked */	\
90	IMASK_UNLOCK
91/*
92 * Test to see whether we are handling an edge or level triggered INT.
93 *  Level-triggered INTs must still be masked as we don't clear the source,
94 *  and the EOI cycle would cause redundant INTs to occur.
95 */
96#define MASK_LEVEL_IRQ(irq_num)						\
97	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
98	jz	9f ;				/* edge, don't mask */	\
99	MASK_IRQ(irq_num) ;						\
1009:
101
102
103#ifdef APIC_INTR_REORDER
104#define EOI_IRQ(irq_num)						\
105	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
106	movl	(%eax), %eax ;						\
107	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
108	jz	9f ;				/* not active */	\
109	movl	$0, lapic_eoi ;						\
110	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1119:
112
113#else
114#define EOI_IRQ(irq_num)						\
115	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
116	jz	9f	;			/* not active */	\
117	movl	$0, lapic_eoi;						\
118	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1199:
120#endif
121
122
123/*
124 * Test to see if the source is currntly masked, clear if so.
125 */
126#define UNMASK_IRQ(irq_num)					\
127	IMASK_LOCK ;				/* into critical reg */	\
128	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
129	je	7f ;			/* bit clear, not masked */	\
130	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
131	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
132	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
133	movl	%eax,(%ecx) ;			/* write the index */	\
134	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
135	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
136	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1377: ;									\
138	IMASK_UNLOCK
139
140#ifdef APIC_INTR_DIAGNOSTIC
141#ifdef APIC_INTR_DIAGNOSTIC_IRQ
142log_intr_event:
143	pushf
144	cli
145	pushl	$CNAME(apic_itrace_debuglock)
146	call	CNAME(s_lock_np)
147	addl	$4, %esp
148	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
149	andl	$32767, %ecx
150	movl	_cpuid, %eax
151	shll	$8,	%eax
152	orl	8(%esp), %eax
153	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
154	incl	%ecx
155	andl	$32767, %ecx
156	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
157	pushl	$CNAME(apic_itrace_debuglock)
158	call	CNAME(s_unlock_np)
159	addl	$4, %esp
160	popf
161	ret
162
163
164#define APIC_ITRACE(name, irq_num, id)					\
165	lock ;					/* MP-safe */		\
166	incl	CNAME(name) + (irq_num) * 4 ;				\
167	pushl	%eax ;							\
168	pushl	%ecx ;							\
169	pushl	%edx ;							\
170	movl	$(irq_num), %eax ;					\
171	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
172	jne	7f ;							\
173	pushl	$id ;							\
174	call	log_intr_event ;					\
175	addl	$4, %esp ;						\
1767: ;									\
177	popl	%edx ;							\
178	popl	%ecx ;							\
179	popl	%eax
180#else
181#define APIC_ITRACE(name, irq_num, id)					\
182	lock ;					/* MP-safe */		\
183	incl	CNAME(name) + (irq_num) * 4
184#endif
185
186#define APIC_ITRACE_ENTER 1
187#define APIC_ITRACE_EOI 2
188#define APIC_ITRACE_TRYISRLOCK 3
189#define APIC_ITRACE_GOTISRLOCK 4
190#define APIC_ITRACE_ENTER2 5
191#define APIC_ITRACE_LEAVE 6
192#define APIC_ITRACE_UNMASK 7
193#define APIC_ITRACE_ACTIVE 8
194#define APIC_ITRACE_MASKED 9
195#define APIC_ITRACE_NOISRLOCK 10
196#define APIC_ITRACE_MASKED2 11
197#define APIC_ITRACE_SPLZ 12
198#define APIC_ITRACE_DORETI 13
199
200#else
201#define APIC_ITRACE(name, irq_num, id)
202#endif
203
204#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
205	.text ;								\
206	SUPERALIGN_TEXT ;						\
207/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
208IDTVEC(vec_name) ;							\
209	PUSH_FRAME ;							\
210	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
211	mov	%ax, %ds ;						\
212	mov	%ax, %es ;						\
213	movl	$KPSEL, %eax ;						\
214	mov	%ax, %fs ;						\
215;									\
216	maybe_extra_ipending ;						\
217;									\
218	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
219	lock ;					/* MP-safe */		\
220	btsl	$(irq_num), iactive ;		/* lazy masking */	\
221	jc	1f ;				/* already active */	\
222;									\
223	MASK_LEVEL_IRQ(irq_num) ;					\
224	EOI_IRQ(irq_num) ;						\
2250: ;									\
226	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
227	MP_TRYLOCK ;		/* XXX this is going away... */		\
228	testl	%eax, %eax ;			/* did we get it? */	\
229	jz	3f ;				/* no */		\
230;									\
231	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
232	testl	$IRQ_BIT(irq_num), _cpl ;				\
233	jne	2f ;				/* this INT masked */	\
234;									\
235	incb	_intr_nesting_level ;					\
236;	 								\
237  /* entry point used by doreti_unpend for HWIs. */			\
238__CONCAT(Xresume,irq_num): ;						\
239	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
240	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
241	movl	_intr_countp + (irq_num) * 4, %eax ;			\
242	lock ;	incl	(%eax) ;					\
243;									\
244	movl	_cpl, %eax ;						\
245	pushl	%eax ;							\
246	orl	_intr_mask + (irq_num) * 4, %eax ;			\
247	movl	%eax, _cpl ;						\
248	lock ;								\
249	andl	$~IRQ_BIT(irq_num), _ipending ;				\
250;									\
251	pushl	_intr_unit + (irq_num) * 4 ;				\
252	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
253	sti ;								\
254	call	*_intr_handler + (irq_num) * 4 ;			\
255	cli ;								\
256	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
257;									\
258	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
259	UNMASK_IRQ(irq_num) ;						\
260	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
261	sti ;				/* doreti repeats cli/sti */	\
262	MEXITCOUNT ;							\
263	jmp	_doreti ;						\
264;									\
265	ALIGN_TEXT ;							\
2661: ;						/* active  */		\
267	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
268	MASK_IRQ(irq_num) ;						\
269	EOI_IRQ(irq_num) ;						\
270	lock ;								\
271	orl	$IRQ_BIT(irq_num), _ipending ;				\
272	lock ;								\
273	btsl	$(irq_num), iactive ;		/* still active */	\
274	jnc	0b ;				/* retry */		\
275	POP_FRAME ;							\
276	iret ;		/* XXX:	 iactive bit might be 0 now */		\
277	ALIGN_TEXT ;							\
2782: ;				/* masked by cpl, leave iactive set */	\
279	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
280	lock ;								\
281	orl	$IRQ_BIT(irq_num), _ipending ;				\
282	MP_RELLOCK ;							\
283	POP_FRAME ;							\
284	iret ;								\
285	ALIGN_TEXT ;							\
2863: ; 			/* other cpu has isr lock */			\
287	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
288	lock ;								\
289	orl	$IRQ_BIT(irq_num), _ipending ;				\
290	testl	$IRQ_BIT(irq_num), _cpl ;				\
291	jne	4f ;				/* this INT masked */	\
292	call	forward_irq ;	 /* forward irq to lock holder */	\
293	POP_FRAME ;	 			/* and return */	\
294	iret ;								\
295	ALIGN_TEXT ;							\
2964: ;	 					/* blocked */		\
297	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
298	POP_FRAME ;	 			/* and return */	\
299	iret
300
301/*
302 * Handle "spurious INTerrupts".
303 * Notes:
304 *  This is different than the "spurious INTerrupt" generated by an
305 *   8259 PIC for missing INTs.  See the APIC documentation for details.
306 *  This routine should NOT do an 'EOI' cycle.
307 */
308	.text
309	SUPERALIGN_TEXT
310	.globl _Xspuriousint
311_Xspuriousint:
312
313	/* No EOI cycle used here */
314
315	iret
316
317
318/*
319 * Handle TLB shootdowns.
320 */
321	.text
322	SUPERALIGN_TEXT
323	.globl	_Xinvltlb
324_Xinvltlb:
325	pushl	%eax
326
327#ifdef COUNT_XINVLTLB_HITS
328	pushl	%fs
329	movl	$KPSEL, %eax
330	mov	%ax, %fs
331	movl	_cpuid, %eax
332	popl	%fs
333	ss
334	incl	_xhits(,%eax,4)
335#endif /* COUNT_XINVLTLB_HITS */
336
337	movl	%cr3, %eax		/* invalidate the TLB */
338	movl	%eax, %cr3
339
340	ss				/* stack segment, avoid %ds load */
341	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
342
343	popl	%eax
344	iret
345
346
347#ifdef BETTER_CLOCK
348
349/*
350 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
351 *
352 *  - Stores current cpu state in checkstate_cpustate[cpuid]
353 *      0 == user, 1 == sys, 2 == intr
354 *  - Stores current process in checkstate_curproc[cpuid]
355 *
356 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
357 *
358 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
359 */
360
361	.text
362	SUPERALIGN_TEXT
363	.globl _Xcpucheckstate
364	.globl _checkstate_cpustate
365	.globl _checkstate_curproc
366	.globl _checkstate_pc
367_Xcpucheckstate:
368	pushl	%eax
369	pushl	%ebx
370	pushl	%ds			/* save current data segment */
371	pushl	%fs
372
373	movl	$KDSEL, %eax
374	mov	%ax, %ds		/* use KERNEL data segment */
375	movl	$KPSEL, %eax
376	mov	%ax, %fs
377
378	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
379
380	movl	$0, %ebx
381	movl	20(%esp), %eax
382	andl	$3, %eax
383	cmpl	$3, %eax
384	je	1f
385	testl	$PSL_VM, 24(%esp)
386	jne	1f
387	incl	%ebx			/* system or interrupt */
3881:
389	movl	_cpuid, %eax
390	movl	%ebx, _checkstate_cpustate(,%eax,4)
391	movl	_curproc, %ebx
392	movl	%ebx, _checkstate_curproc(,%eax,4)
393	movl	16(%esp), %ebx
394	movl	%ebx, _checkstate_pc(,%eax,4)
395
396	lock				/* checkstate_probed_cpus |= (1<<id) */
397	btsl	%eax, _checkstate_probed_cpus
398
399	popl	%fs
400	popl	%ds			/* restore previous data segment */
401	popl	%ebx
402	popl	%eax
403	iret
404
405#endif /* BETTER_CLOCK */
406
407/*
408 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
409 *
410 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
411 *
412 *  - We need a better method of triggering asts on other cpus.
413 */
414
415	.text
416	SUPERALIGN_TEXT
417	.globl _Xcpuast
418_Xcpuast:
419	PUSH_FRAME
420	movl	$KDSEL, %eax
421	mov	%ax, %ds		/* use KERNEL data segment */
422	mov	%ax, %es
423	movl	$KPSEL, %eax
424	mov	%ax, %fs
425
426	movl	_cpuid, %eax
427	lock				/* checkstate_need_ast &= ~(1<<id) */
428	btrl	%eax, _checkstate_need_ast
429	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
430
431	lock
432	btsl	%eax, _checkstate_pending_ast
433	jc	1f
434
435	FAKE_MCOUNT(13*4(%esp))
436
437	/*
438	 * Giant locks do not come cheap.
439	 * A lot of cycles are going to be wasted here.
440	 */
441	call	_get_mplock
442
443	movl	_cpl, %eax
444	pushl	%eax
445	orl	$AST_PENDING, _astpending	/* XXX */
446	incb	_intr_nesting_level
447	sti
448
449	pushl	$0
450
451	movl	_cpuid, %eax
452	lock
453	btrl	%eax, _checkstate_pending_ast
454	lock
455	btrl	%eax, CNAME(resched_cpus)
456	jnc	2f
457	orl	$AST_PENDING+AST_RESCHED,_astpending
458	lock
459	incl	CNAME(want_resched_cnt)
4602:
461	lock
462	incl	CNAME(cpuast_cnt)
463	MEXITCOUNT
464	jmp	_doreti
4651:
466	/* We are already in the process of delivering an ast for this CPU */
467	POP_FRAME
468	iret
469
470
471/*
472 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
473 */
474
475	.text
476	SUPERALIGN_TEXT
477	.globl _Xforward_irq
478_Xforward_irq:
479	PUSH_FRAME
480	movl	$KDSEL, %eax
481	mov	%ax, %ds		/* use KERNEL data segment */
482	mov	%ax, %es
483	movl	$KPSEL, %eax
484	mov	%ax, %fs
485
486	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
487
488	FAKE_MCOUNT(13*4(%esp))
489
490	MP_TRYLOCK
491	testl	%eax,%eax		/* Did we get the lock ? */
492	jz  1f				/* No */
493
494	lock
495	incl	CNAME(forward_irq_hitcnt)
496	cmpb	$4, _intr_nesting_level
497	jae	2f
498
499	movl	_cpl, %eax
500	pushl	%eax
501	incb	_intr_nesting_level
502	sti
503
504	pushl	$0
505
506	MEXITCOUNT
507	jmp	_doreti			/* Handle forwarded interrupt */
5081:
509	lock
510	incl	CNAME(forward_irq_misscnt)
511	call	forward_irq	/* Oops, we've lost the isr lock */
512	MEXITCOUNT
513	POP_FRAME
514	iret
5152:
516	lock
517	incl	CNAME(forward_irq_toodeepcnt)
5183:
519	MP_RELLOCK
520	MEXITCOUNT
521	POP_FRAME
522	iret
523
524/*
525 *
526 */
527forward_irq:
528	MCOUNT
529	cmpl	$0,_invltlb_ok
530	jz	4f
531
532	cmpl	$0, CNAME(forward_irq_enabled)
533	jz	4f
534
535	movl	_mp_lock,%eax
536	cmpl	$FREE_LOCK,%eax
537	jne	1f
538	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
5391:
540	shrl	$24,%eax
541	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
542	shll	$24,%ecx
543	movl	lapic_icr_hi, %eax
544	andl	$~APIC_ID_MASK, %eax
545	orl	%ecx, %eax
546	movl	%eax, lapic_icr_hi
547
5482:
549	movl	lapic_icr_lo, %eax
550	andl	$APIC_DELSTAT_MASK,%eax
551	jnz	2b
552	movl	lapic_icr_lo, %eax
553	andl	$APIC_RESV2_MASK, %eax
554	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
555	movl	%eax, lapic_icr_lo
5563:
557	movl	lapic_icr_lo, %eax
558	andl	$APIC_DELSTAT_MASK,%eax
559	jnz	3b
5604:
561	ret
562
563/*
564 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
565 *
566 *  - Signals its receipt.
567 *  - Waits for permission to restart.
568 *  - Signals its restart.
569 */
570
571	.text
572	SUPERALIGN_TEXT
573	.globl _Xcpustop
574_Xcpustop:
575	pushl	%ebp
576	movl	%esp, %ebp
577	pushl	%eax
578	pushl	%ecx
579	pushl	%edx
580	pushl	%ds			/* save current data segment */
581	pushl	%fs
582
583	movl	$KDSEL, %eax
584	mov	%ax, %ds		/* use KERNEL data segment */
585	movl	$KPSEL, %eax
586	mov	%ax, %fs
587
588	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
589
590	movl	_cpuid, %eax
591	imull	$PCB_SIZE, %eax
592	leal	CNAME(stoppcbs)(%eax), %eax
593	pushl	%eax
594	call	CNAME(savectx)		/* Save process context */
595	addl	$4, %esp
596
597
598	movl	_cpuid, %eax
599
600	lock
601	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
6021:
603	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
604	jnc	1b
605
606	lock
607	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
608	lock
609	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
610
611	test	%eax, %eax
612	jnz	2f
613
614	movl	CNAME(cpustop_restartfunc), %eax
615	test	%eax, %eax
616	jz	2f
617	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
618
619	call	*%eax
6202:
621	popl	%fs
622	popl	%ds			/* restore previous data segment */
623	popl	%edx
624	popl	%ecx
625	popl	%eax
626	movl	%ebp, %esp
627	popl	%ebp
628	iret
629
630
631MCOUNT_LABEL(bintr)
632	FAST_INTR(0,fastintr0)
633	FAST_INTR(1,fastintr1)
634	FAST_INTR(2,fastintr2)
635	FAST_INTR(3,fastintr3)
636	FAST_INTR(4,fastintr4)
637	FAST_INTR(5,fastintr5)
638	FAST_INTR(6,fastintr6)
639	FAST_INTR(7,fastintr7)
640	FAST_INTR(8,fastintr8)
641	FAST_INTR(9,fastintr9)
642	FAST_INTR(10,fastintr10)
643	FAST_INTR(11,fastintr11)
644	FAST_INTR(12,fastintr12)
645	FAST_INTR(13,fastintr13)
646	FAST_INTR(14,fastintr14)
647	FAST_INTR(15,fastintr15)
648	FAST_INTR(16,fastintr16)
649	FAST_INTR(17,fastintr17)
650	FAST_INTR(18,fastintr18)
651	FAST_INTR(19,fastintr19)
652	FAST_INTR(20,fastintr20)
653	FAST_INTR(21,fastintr21)
654	FAST_INTR(22,fastintr22)
655	FAST_INTR(23,fastintr23)
656#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
657	INTR(0,intr0, CLKINTR_PENDING)
658	INTR(1,intr1,)
659	INTR(2,intr2,)
660	INTR(3,intr3,)
661	INTR(4,intr4,)
662	INTR(5,intr5,)
663	INTR(6,intr6,)
664	INTR(7,intr7,)
665	INTR(8,intr8,)
666	INTR(9,intr9,)
667	INTR(10,intr10,)
668	INTR(11,intr11,)
669	INTR(12,intr12,)
670	INTR(13,intr13,)
671	INTR(14,intr14,)
672	INTR(15,intr15,)
673	INTR(16,intr16,)
674	INTR(17,intr17,)
675	INTR(18,intr18,)
676	INTR(19,intr19,)
677	INTR(20,intr20,)
678	INTR(21,intr21,)
679	INTR(22,intr22,)
680	INTR(23,intr23,)
681MCOUNT_LABEL(eintr)
682
683/*
684 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
685 *
686 * - Calls the generic rendezvous action function.
687 */
688	.text
689	SUPERALIGN_TEXT
690	.globl	_Xrendezvous
691_Xrendezvous:
692	PUSH_FRAME
693	movl	$KDSEL, %eax
694	mov	%ax, %ds		/* use KERNEL data segment */
695	mov	%ax, %es
696	movl	$KPSEL, %eax
697	mov	%ax, %fs
698
699	call	_smp_rendezvous_action
700
701	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
702	POP_FRAME
703	iret
704
705
706	.data
707/*
708 * Addresses of interrupt handlers.
709 *  XresumeNN: Resumption addresses for HWIs.
710 */
711	.globl _ihandlers
712_ihandlers:
713/*
714 * used by:
715 *  ipl.s:	doreti_unpend
716 */
717	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
718	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
719	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
720	.long	Xresume12, Xresume13, Xresume14, Xresume15
721	.long	Xresume16, Xresume17, Xresume18, Xresume19
722	.long	Xresume20, Xresume21, Xresume22, Xresume23
723/*
724 * used by:
725 *  ipl.s:	doreti_unpend
726 *  apic_ipl.s:	splz_unpend
727 */
728	.long	_swi_null, swi_net, _swi_null, _swi_null
729	.long	_swi_vm, _swi_null, _softclock
730
731imasks:				/* masks for interrupt handlers */
732	.space	NHWI*4		/* padding; HWI masks are elsewhere */
733
734	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
735	.long	SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
736
737/* active flag for lazy masking */
738iactive:
739	.long	0
740
741#ifdef COUNT_XINVLTLB_HITS
742	.globl	_xhits
743_xhits:
744	.space	(NCPU * 4), 0
745#endif /* COUNT_XINVLTLB_HITS */
746
747/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
748	.globl _stopped_cpus, _started_cpus
749_stopped_cpus:
750	.long	0
751_started_cpus:
752	.long	0
753
754#ifdef BETTER_CLOCK
755	.globl _checkstate_probed_cpus
756_checkstate_probed_cpus:
757	.long	0
758#endif /* BETTER_CLOCK */
759	.globl _checkstate_need_ast
760_checkstate_need_ast:
761	.long	0
762_checkstate_pending_ast:
763	.long	0
764	.globl CNAME(forward_irq_misscnt)
765	.globl CNAME(forward_irq_toodeepcnt)
766	.globl CNAME(forward_irq_hitcnt)
767	.globl CNAME(resched_cpus)
768	.globl CNAME(want_resched_cnt)
769	.globl CNAME(cpuast_cnt)
770	.globl CNAME(cpustop_restartfunc)
771CNAME(forward_irq_misscnt):
772	.long 0
773CNAME(forward_irq_hitcnt):
774	.long 0
775CNAME(forward_irq_toodeepcnt):
776	.long 0
777CNAME(resched_cpus):
778	.long 0
779CNAME(want_resched_cnt):
780	.long 0
781CNAME(cpuast_cnt):
782	.long 0
783CNAME(cpustop_restartfunc):
784	.long 0
785
786
787
788	.globl	_apic_pin_trigger
789_apic_pin_trigger:
790	.long	0
791
792	.text
793