apic_vector.s revision 47942
1178023Semax/*
2178023Semax *	from: vector.s, 386BSD 0.1 unknown origin
3178023Semax *	$Id: apic_vector.s,v 1.39 1999/06/01 18:20:11 jlemon Exp $
4178023Semax */
5178023Semax
6178023Semax
7178023Semax#include <machine/apic.h>
8178023Semax#include <machine/smp.h>
9178023Semax
10178023Semax#include "i386/isa/intr_machdep.h"
11178023Semax
12178023Semax
13178023Semax#ifdef FAST_SIMPLELOCK
14230099Sdougb
15178023Semax#define GET_FAST_INTR_LOCK						\
16178023Semax	pushl	$_fast_intr_lock ;		/* address of lock */	\
17178023Semax	call	_s_lock ;			/* MP-safe */		\
18178023Semax	addl	$4,%esp
19178023Semax
20178023Semax#define REL_FAST_INTR_LOCK						\
21178023Semax	pushl	$_fast_intr_lock ;		/* address of lock */	\
22178023Semax	call	_s_unlock ;			/* MP-safe */		\
23178023Semax	addl	$4,%esp
24178023Semax
25178023Semax#else /* FAST_SIMPLELOCK */
26178023Semax
27178023Semax#define GET_FAST_INTR_LOCK						\
28178023Semax	call	_get_isrlock
29178023Semax
30178023Semax#define REL_FAST_INTR_LOCK						\
31178023Semax	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32178023Semax	call	_MPrellock ;						\
33178023Semax	add	$4, %esp
34178023Semax
35178023Semax#endif /* FAST_SIMPLELOCK */
36178023Semax
37178023Semax/* convert an absolute IRQ# into a bitmask */
38178023Semax#define IRQ_BIT(irq_num)	(1 << (irq_num))
39178023Semax
40178023Semax/* make an index into the IO APIC from the IRQ# */
41178023Semax#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42178023Semax
43178023Semax
44178023Semax/*
45178023Semax * Macros for interrupt interrupt entry, call to handler, and exit.
46178023Semax */
47178023Semax
48178023Semax#ifdef FAST_WITHOUTCPL
49208060Sdougb
50178023Semax/*
51178023Semax */
52178023Semax#define	FAST_INTR(irq_num, vec_name)					\
53178023Semax	.text ;								\
54178023Semax	SUPERALIGN_TEXT ;						\
55178023SemaxIDTVEC(vec_name) ;							\
56178023Semax	pushl	%eax ;		/* save only call-used registers */	\
57178023Semax	pushl	%ecx ;							\
58208060Sdougb	pushl	%edx ;							\
59178023Semax	pushl	%ds ;							\
60178023Semax	MAYBE_PUSHL_ES ;						\
61178023Semax	pushl	%fs ;							\
62178023Semax	movl	$KDSEL,%eax ;						\
63178023Semax	movl	%ax,%ds ;						\
64178023Semax	MAYBE_MOVW_AX_ES ;						\
65178023Semax	movl	$KPSEL,%eax ;						\
66178023Semax	movl	%ax,%fs ;						\
67178023Semax	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
68178023Semax	pushl	_intr_unit + (irq_num) * 4 ;				\
69178023Semax	GET_FAST_INTR_LOCK ;						\
70178023Semax	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
71178023Semax	REL_FAST_INTR_LOCK ;						\
72178023Semax	addl	$4, %esp ;						\
73178023Semax	movl	$0, lapic_eoi ;						\
74178023Semax	lock ; 								\
75178023Semax	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
76178023Semax	movl	_intr_countp + (irq_num) * 4, %eax ;			\
77178023Semax	lock ; 								\
78178023Semax	incl	(%eax) ;						\
79178023Semax	MEXITCOUNT ;							\
80178023Semax	popl	%fs ;							\
81178023Semax	MAYBE_POPL_ES ;							\
82178023Semax	popl	%ds ;							\
83178023Semax	popl	%edx ;							\
84178023Semax	popl	%ecx ;							\
85178023Semax	popl	%eax ;							\
86178023Semax	iret
87178023Semax
88178023Semax#else /* FAST_WITHOUTCPL */
89178023Semax
90178023Semax#define	FAST_INTR(irq_num, vec_name)					\
91178023Semax	.text ;								\
92178023Semax	SUPERALIGN_TEXT ;						\
93178023SemaxIDTVEC(vec_name) ;							\
94178023Semax	pushl	%eax ;		/* save only call-used registers */	\
95178023Semax	pushl	%ecx ;							\
96178023Semax	pushl	%edx ;							\
97178023Semax	pushl	%ds ;							\
98178023Semax	MAYBE_PUSHL_ES ;						\
99178023Semax	pushl	%fs ;							\
100178023Semax	movl	$KDSEL, %eax ;						\
101178023Semax	movl	%ax, %ds ;						\
102178023Semax	MAYBE_MOVW_AX_ES ;						\
103178023Semax	movl	$KPSEL, %eax ;						\
104178023Semax	movl	%ax, %fs ;						\
105178023Semax	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
106178023Semax	GET_FAST_INTR_LOCK ;						\
107178023Semax	pushl	_intr_unit + (irq_num) * 4 ;				\
108178023Semax	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
109178023Semax	addl	$4, %esp ;						\
110178023Semax	movl	$0, lapic_eoi ;						\
111178023Semax	lock ; 								\
112178023Semax	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
113178023Semax	movl	_intr_countp + (irq_num) * 4,%eax ;			\
114178023Semax	lock ; 								\
115178023Semax	incl	(%eax) ;						\
116178023Semax	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
117178023Semax	notl	%eax ;							\
118178023Semax	andl	_ipending, %eax ;					\
119178023Semax	jne	2f ; 		/* yes, maybe handle them */		\
120178023Semax1: ;									\
121178023Semax	MEXITCOUNT ;							\
122178023Semax	REL_FAST_INTR_LOCK ;						\
123	popl	%fs ;							\
124	MAYBE_POPL_ES ;							\
125	popl	%ds ;							\
126	popl	%edx ;							\
127	popl	%ecx ;							\
128	popl	%eax ;							\
129	iret ;								\
130;									\
131	ALIGN_TEXT ;							\
1322: ;									\
133	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
134	jae	1b ;		/* no, return */			\
135	movl	_cpl, %eax ;						\
136	/* XXX next line is probably unnecessary now. */		\
137	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
138	lock ; 								\
139	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
140	sti ;			/* to do this as early as possible */	\
141	popl	%fs ;		/* discard most of thin frame ... */	\
142	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
143	popl	%ecx ;		/* ... original %ds ... */		\
144	popl	%edx ;							\
145	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
146	pushal ;		/* build fat frame (grrr) ... */	\
147	pushl	%ecx ;		/* ... actually %ds ... */		\
148	pushl	%es ;							\
149	pushl	%fs ;
150	movl	$KDSEL, %eax ;						\
151	movl	%ax, %es ;						\
152	movl	$KPSEL, %eax ;
153	movl	%ax, %fs ;
154	movl	(3+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
155	movl	%ecx, (3+6)*4(%esp) ;	/* ... to fat frame ... */	\
156	movl	(3+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
157	pushl	%eax ;							\
158	subl	$4, %esp ;	/* junk for unit number */		\
159	MEXITCOUNT ;							\
160	jmp	_doreti
161
162#endif /** FAST_WITHOUTCPL */
163
164
165/*
166 *
167 */
168#define PUSH_FRAME							\
169	pushl	$0 ;		/* dummy error code */			\
170	pushl	$0 ;		/* dummy trap type */			\
171	pushal ;							\
172	pushl	%ds ;		/* save data and extra segments ... */	\
173	pushl	%es ;							\
174	pushl	%fs
175
176#define POP_FRAME							\
177	popl	%fs ;							\
178	popl	%es ;							\
179	popl	%ds ;							\
180	popal ;								\
181	addl	$4+4,%esp
182
183#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
184#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
185
186#define MASK_IRQ(irq_num)						\
187	IMASK_LOCK ;				/* into critical reg */	\
188	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
189	jne	7f ;			/* masked, don't mask */	\
190	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
191	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
192	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
193	movl	%eax, (%ecx) ;			/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1977: ;						/* already masked */	\
198	IMASK_UNLOCK
199/*
200 * Test to see whether we are handling an edge or level triggered INT.
201 *  Level-triggered INTs must still be masked as we don't clear the source,
202 *  and the EOI cycle would cause redundant INTs to occur.
203 */
204#define MASK_LEVEL_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
206	jz	9f ;				/* edge, don't mask */	\
207	MASK_IRQ(irq_num) ;						\
2089:
209
210
211#ifdef APIC_INTR_REORDER
212#define EOI_IRQ(irq_num)						\
213	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
214	movl	(%eax), %eax ;						\
215	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
216	jz	9f ;				/* not active */	\
217	movl	$0, lapic_eoi ;						\
218	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2199:
220
221#else
222#define EOI_IRQ(irq_num)						\
223	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
224	jz	9f	;			/* not active */	\
225	movl	$0, lapic_eoi;						\
226	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2279:
228#endif
229
230
231/*
232 * Test to see if the source is currntly masked, clear if so.
233 */
234#define UNMASK_IRQ(irq_num)					\
235	IMASK_LOCK ;				/* into critical reg */	\
236	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
237	je	7f ;			/* bit clear, not masked */	\
238	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
239	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
240	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
241	movl	%eax,(%ecx) ;			/* write the index */	\
242	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
243	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
244	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2457: ;									\
246	IMASK_UNLOCK
247
248#ifdef INTR_SIMPLELOCK
249#define ENLOCK
250#define DELOCK
251#define LATELOCK call	_get_isrlock
252#else
253#define ENLOCK \
254	ISR_TRYLOCK ;		/* XXX this is going away... */		\
255	testl	%eax, %eax ;			/* did we get it? */	\
256	jz	3f
257#define DELOCK	ISR_RELLOCK
258#define LATELOCK
259#endif
260
261#ifdef APIC_INTR_DIAGNOSTIC
262#ifdef APIC_INTR_DIAGNOSTIC_IRQ
263log_intr_event:
264	pushf
265	cli
266	pushl	$CNAME(apic_itrace_debuglock)
267	call	CNAME(s_lock_np)
268	addl	$4, %esp
269	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
270	andl	$32767, %ecx
271	movl	_cpuid, %eax
272	shll	$8,	%eax
273	orl	8(%esp), %eax
274	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
275	incl	%ecx
276	andl	$32767, %ecx
277	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
278	pushl	$CNAME(apic_itrace_debuglock)
279	call	CNAME(s_unlock_np)
280	addl	$4, %esp
281	popf
282	ret
283
284
285#define APIC_ITRACE(name, irq_num, id)					\
286	lock ;					/* MP-safe */		\
287	incl	CNAME(name) + (irq_num) * 4 ;				\
288	pushl	%eax ;							\
289	pushl	%ecx ;							\
290	pushl	%edx ;							\
291	movl	$(irq_num), %eax ;					\
292	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
293	jne	7f ;							\
294	pushl	$id ;							\
295	call	log_intr_event ;					\
296	addl	$4, %esp ;						\
2977: ;									\
298	popl	%edx ;							\
299	popl	%ecx ;							\
300	popl	%eax
301#else
302#define APIC_ITRACE(name, irq_num, id)					\
303	lock ;					/* MP-safe */		\
304	incl	CNAME(name) + (irq_num) * 4
305#endif
306
307#define APIC_ITRACE_ENTER 1
308#define APIC_ITRACE_EOI 2
309#define APIC_ITRACE_TRYISRLOCK 3
310#define APIC_ITRACE_GOTISRLOCK 4
311#define APIC_ITRACE_ENTER2 5
312#define APIC_ITRACE_LEAVE 6
313#define APIC_ITRACE_UNMASK 7
314#define APIC_ITRACE_ACTIVE 8
315#define APIC_ITRACE_MASKED 9
316#define APIC_ITRACE_NOISRLOCK 10
317#define APIC_ITRACE_MASKED2 11
318#define APIC_ITRACE_SPLZ 12
319#define APIC_ITRACE_DORETI 13
320
321#else
322#define APIC_ITRACE(name, irq_num, id)
323#endif
324
325#ifdef CPL_AND_CML
326
327#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
328	.text ;								\
329	SUPERALIGN_TEXT ;						\
330/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
331IDTVEC(vec_name) ;							\
332	PUSH_FRAME ;							\
333	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
334	movl	%ax, %ds ;						\
335	movl	%ax, %es ;						\
336	movl	$KPSEL, %eax ;						\
337	movl	%ax, %fs ;						\
338;									\
339	maybe_extra_ipending ;						\
340;									\
341	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
342	lock ;					/* MP-safe */		\
343	btsl	$(irq_num), iactive ;		/* lazy masking */	\
344	jc	1f ;				/* already active */	\
345;									\
346	MASK_LEVEL_IRQ(irq_num) ;					\
347	EOI_IRQ(irq_num) ;						\
3480: ;									\
349	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
350	ENLOCK ;							\
351;									\
352	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
353	AVCPL_LOCK ;				/* MP-safe */		\
354	testl	$IRQ_BIT(irq_num), _cpl ;				\
355	jne	2f ;				/* this INT masked */	\
356	testl	$IRQ_BIT(irq_num), _cml ;				\
357	jne	2f ;				/* this INT masked */	\
358	orl	$IRQ_BIT(irq_num), _cil ;				\
359	AVCPL_UNLOCK ;							\
360;									\
361	incb	_intr_nesting_level ;					\
362;	 								\
363  /* entry point used by doreti_unpend for HWIs. */			\
364__CONCAT(Xresume,irq_num): ;						\
365	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
366	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
367	movl	_intr_countp + (irq_num) * 4, %eax ;			\
368	lock ;	incl	(%eax) ;					\
369;									\
370	AVCPL_LOCK ;				/* MP-safe */		\
371	movl	_cml, %eax ;						\
372	pushl	%eax ;							\
373	orl	_intr_mask + (irq_num) * 4, %eax ;			\
374	movl	%eax, _cml ;						\
375	AVCPL_UNLOCK ;							\
376;									\
377	pushl	_intr_unit + (irq_num) * 4 ;				\
378	incl	_inside_intr ;						\
379	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
380	sti ;								\
381	call	*_intr_handler + (irq_num) * 4 ;			\
382	cli ;								\
383	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
384	decl	_inside_intr ;						\
385;									\
386	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
387	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
388	UNMASK_IRQ(irq_num) ;						\
389	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
390	sti ;				/* doreti repeats cli/sti */	\
391	MEXITCOUNT ;							\
392	LATELOCK ;							\
393	jmp	_doreti ;						\
394;									\
395	ALIGN_TEXT ;							\
3961: ;						/* active */		\
397	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
398	MASK_IRQ(irq_num) ;						\
399	EOI_IRQ(irq_num) ;						\
400	AVCPL_LOCK ;				/* MP-safe */		\
401	orl	$IRQ_BIT(irq_num), _ipending ;				\
402	AVCPL_UNLOCK ;							\
403	lock ;								\
404	btsl	$(irq_num), iactive ;		/* still active */	\
405	jnc	0b ;				/* retry */		\
406	POP_FRAME ;							\
407	iret ;								\
408;									\
409	ALIGN_TEXT ;							\
4102: ;						/* masked by cpl|cml */	\
411	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
412	orl	$IRQ_BIT(irq_num), _ipending ;				\
413	AVCPL_UNLOCK ;							\
414	DELOCK ;		/* XXX this is going away... */		\
415	POP_FRAME ;							\
416	iret ;								\
417	ALIGN_TEXT ;							\
4183: ; 			/* other cpu has isr lock */			\
419	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
420	AVCPL_LOCK ;				/* MP-safe */		\
421	orl	$IRQ_BIT(irq_num), _ipending ;				\
422	testl	$IRQ_BIT(irq_num), _cpl ;				\
423	jne	4f ;				/* this INT masked */	\
424	testl	$IRQ_BIT(irq_num), _cml ;				\
425	jne	4f ;				/* this INT masked */	\
426	orl	$IRQ_BIT(irq_num), _cil ;				\
427	AVCPL_UNLOCK ;							\
428	call	forward_irq ;	/* forward irq to lock holder */	\
429	POP_FRAME ;	 			/* and return */	\
430	iret ;								\
431	ALIGN_TEXT ;							\
4324: ;	 					/* blocked */		\
433	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
434	AVCPL_UNLOCK ;							\
435	POP_FRAME ;	 			/* and return */	\
436	iret
437
438#else /* CPL_AND_CML */
439
440
441#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
442	.text ;								\
443	SUPERALIGN_TEXT ;						\
444/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
445IDTVEC(vec_name) ;							\
446	PUSH_FRAME ;							\
447	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
448	movl	%ax, %ds ;						\
449	movl	%ax, %es ;						\
450	movl	$KPSEL, %eax ;						\
451	movl	%ax, %fs ;						\
452;									\
453	maybe_extra_ipending ;						\
454;									\
455	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
456	lock ;					/* MP-safe */		\
457	btsl	$(irq_num), iactive ;		/* lazy masking */	\
458	jc	1f ;				/* already active */	\
459;									\
460	MASK_LEVEL_IRQ(irq_num) ;					\
461	EOI_IRQ(irq_num) ;						\
4620: ;									\
463	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
464	ISR_TRYLOCK ;		/* XXX this is going away... */		\
465	testl	%eax, %eax ;			/* did we get it? */	\
466	jz	3f ;				/* no */		\
467;									\
468	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
469	AVCPL_LOCK ;				/* MP-safe */		\
470	testl	$IRQ_BIT(irq_num), _cpl ;				\
471	jne	2f ;				/* this INT masked */	\
472	AVCPL_UNLOCK ;							\
473;									\
474	incb	_intr_nesting_level ;					\
475;	 								\
476  /* entry point used by doreti_unpend for HWIs. */			\
477__CONCAT(Xresume,irq_num): ;						\
478	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
479	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
480	movl	_intr_countp + (irq_num) * 4, %eax ;			\
481	lock ;	incl	(%eax) ;					\
482;									\
483	AVCPL_LOCK ;				/* MP-safe */		\
484	movl	_cpl, %eax ;						\
485	pushl	%eax ;							\
486	orl	_intr_mask + (irq_num) * 4, %eax ;			\
487	movl	%eax, _cpl ;						\
488	andl	$~IRQ_BIT(irq_num), _ipending ;				\
489	AVCPL_UNLOCK ;							\
490;									\
491	pushl	_intr_unit + (irq_num) * 4 ;				\
492	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
493	sti ;								\
494	call	*_intr_handler + (irq_num) * 4 ;			\
495	cli ;								\
496	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
497;									\
498	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
499	UNMASK_IRQ(irq_num) ;						\
500	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
501	sti ;				/* doreti repeats cli/sti */	\
502	MEXITCOUNT ;							\
503	jmp	_doreti ;						\
504;									\
505	ALIGN_TEXT ;							\
5061: ;						/* active  */		\
507	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
508	MASK_IRQ(irq_num) ;						\
509	EOI_IRQ(irq_num) ;						\
510	AVCPL_LOCK ;				/* MP-safe */		\
511	orl	$IRQ_BIT(irq_num), _ipending ;				\
512	AVCPL_UNLOCK ;							\
513	lock ;								\
514	btsl	$(irq_num), iactive ;		/* still active */	\
515	jnc	0b ;				/* retry */		\
516	POP_FRAME ;							\
517	iret ;		/* XXX:	 iactive bit might be 0 now */		\
518	ALIGN_TEXT ;							\
5192: ;				/* masked by cpl, leave iactive set */	\
520	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
521	orl	$IRQ_BIT(irq_num), _ipending ;				\
522	AVCPL_UNLOCK ;							\
523	ISR_RELLOCK ;		/* XXX this is going away... */		\
524	POP_FRAME ;							\
525	iret ;								\
526	ALIGN_TEXT ;							\
5273: ; 			/* other cpu has isr lock */			\
528	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
529	AVCPL_LOCK ;				/* MP-safe */		\
530	orl	$IRQ_BIT(irq_num), _ipending ;				\
531	testl	$IRQ_BIT(irq_num), _cpl ;				\
532	jne	4f ;				/* this INT masked */	\
533	AVCPL_UNLOCK ;							\
534	call	forward_irq ;	 /* forward irq to lock holder */	\
535	POP_FRAME ;	 			/* and return */	\
536	iret ;								\
537	ALIGN_TEXT ;							\
5384: ;	 					/* blocked */		\
539	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
540	AVCPL_UNLOCK ;							\
541	POP_FRAME ;	 			/* and return */	\
542	iret
543
544#endif /* CPL_AND_CML */
545
546
547/*
548 * Handle "spurious INTerrupts".
549 * Notes:
550 *  This is different than the "spurious INTerrupt" generated by an
551 *   8259 PIC for missing INTs.  See the APIC documentation for details.
552 *  This routine should NOT do an 'EOI' cycle.
553 */
554	.text
555	SUPERALIGN_TEXT
556	.globl _Xspuriousint
557_Xspuriousint:
558
559	/* No EOI cycle used here */
560
561	iret
562
563
564/*
565 * Handle TLB shootdowns.
566 */
567	.text
568	SUPERALIGN_TEXT
569	.globl	_Xinvltlb
570_Xinvltlb:
571	pushl	%eax
572
573#ifdef COUNT_XINVLTLB_HITS
574	pushl	%fs
575	movl	$KPSEL, %eax
576	movl	%ax, %fs
577	movl	_cpuid, %eax
578	popl	%fs
579	ss
580	incl	_xhits(,%eax,4)
581#endif /* COUNT_XINVLTLB_HITS */
582
583	movl	%cr3, %eax		/* invalidate the TLB */
584	movl	%eax, %cr3
585
586	ss				/* stack segment, avoid %ds load */
587	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
588
589	popl	%eax
590	iret
591
592
593#ifdef BETTER_CLOCK
594
595/*
596 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
597 *
598 *  - Stores current cpu state in checkstate_cpustate[cpuid]
599 *      0 == user, 1 == sys, 2 == intr
600 *  - Stores current process in checkstate_curproc[cpuid]
601 *
602 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
603 *
604 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
605 */
606
607	.text
608	SUPERALIGN_TEXT
609	.globl _Xcpucheckstate
610	.globl _checkstate_cpustate
611	.globl _checkstate_curproc
612	.globl _checkstate_pc
613_Xcpucheckstate:
614	pushl	%eax
615	pushl	%ebx
616	pushl	%ds			/* save current data segment */
617	pushl	%fs
618
619	movl	$KDSEL, %eax
620	movl	%ax, %ds		/* use KERNEL data segment */
621	movl	$KPSEL, %eax
622	movl	%ax, %fs
623
624	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
625
626	movl	$0, %ebx
627	movl	20(%esp), %eax
628	andl	$3, %eax
629	cmpl	$3, %eax
630	je	1f
631	testl	$PSL_VM, 24(%esp)
632	jne	1f
633	incl	%ebx			/* system or interrupt */
634#ifdef CPL_AND_CML
635	cmpl	$0, _inside_intr
636	je	1f
637	incl	%ebx			/* interrupt */
638#endif
6391:
640	movl	_cpuid, %eax
641	movl	%ebx, _checkstate_cpustate(,%eax,4)
642	movl	_curproc, %ebx
643	movl	%ebx, _checkstate_curproc(,%eax,4)
644	movl	16(%esp), %ebx
645	movl	%ebx, _checkstate_pc(,%eax,4)
646
647	lock				/* checkstate_probed_cpus |= (1<<id) */
648	btsl	%eax, _checkstate_probed_cpus
649
650	popl	%fs
651	popl	%ds			/* restore previous data segment */
652	popl	%ebx
653	popl	%eax
654	iret
655
656#endif /* BETTER_CLOCK */
657
658/*
659 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
660 *
661 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
662 *
663 *  - We need a better method of triggering asts on other cpus.
664 */
665
666	.text
667	SUPERALIGN_TEXT
668	.globl _Xcpuast
669_Xcpuast:
670	PUSH_FRAME
671	movl	$KDSEL, %eax
672	movl	%ax, %ds		/* use KERNEL data segment */
673	movl	%ax, %es
674	movl	$KPSEL, %eax
675	movl	%ax, %fs
676
677	movl	_cpuid, %eax
678	lock				/* checkstate_need_ast &= ~(1<<id) */
679	btrl	%eax, _checkstate_need_ast
680	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
681
682	lock
683	btsl	%eax, _checkstate_pending_ast
684	jc	1f
685
686	FAKE_MCOUNT(13*4(%esp))
687
688	/*
689	 * Giant locks do not come cheap.
690	 * A lot of cycles are going to be wasted here.
691	 */
692	call	_get_isrlock
693
694	AVCPL_LOCK
695#ifdef CPL_AND_CML
696	movl	_cml, %eax
697#else
698	movl	_cpl, %eax
699#endif
700	pushl	%eax
701	orl	$SWI_AST_PENDING, _ipending
702	AVCPL_UNLOCK
703	lock
704	incb	_intr_nesting_level
705	sti
706
707	pushl	$0
708
709	movl	_cpuid, %eax
710	lock
711	btrl	%eax, _checkstate_pending_ast
712	lock
713	btrl	%eax, CNAME(resched_cpus)
714	jnc	2f
715	movl	$1, CNAME(want_resched)
716	lock
717	incl	CNAME(want_resched_cnt)
7182:
719	lock
720	incl	CNAME(cpuast_cnt)
721	MEXITCOUNT
722	jmp	_doreti
7231:
724	/* We are already in the process of delivering an ast for this CPU */
725	POP_FRAME
726	iret
727
728
729/*
730 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
731 */
732
733	.text
734	SUPERALIGN_TEXT
735	.globl _Xforward_irq
736_Xforward_irq:
737	PUSH_FRAME
738	movl	$KDSEL, %eax
739	movl	%ax, %ds		/* use KERNEL data segment */
740	movl	%ax, %es
741	movl	$KPSEL, %eax
742	movl	%ax, %fs
743
744	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
745
746	FAKE_MCOUNT(13*4(%esp))
747
748	ISR_TRYLOCK
749	testl	%eax,%eax		/* Did we get the lock ? */
750	jz  1f				/* No */
751
752	lock
753	incl	CNAME(forward_irq_hitcnt)
754	cmpb	$4, _intr_nesting_level
755	jae	2f
756
757	AVCPL_LOCK
758#ifdef CPL_AND_CML
759	movl	_cml, %eax
760#else
761	movl	_cpl, %eax
762#endif
763	pushl	%eax
764	AVCPL_UNLOCK
765	lock
766	incb	_intr_nesting_level
767	sti
768
769	pushl	$0
770
771	MEXITCOUNT
772	jmp	_doreti			/* Handle forwarded interrupt */
7731:
774	lock
775	incl	CNAME(forward_irq_misscnt)
776	call	forward_irq	/* Oops, we've lost the isr lock */
777	MEXITCOUNT
778	POP_FRAME
779	iret
7802:
781	lock
782	incl	CNAME(forward_irq_toodeepcnt)
7833:
784	ISR_RELLOCK
785	MEXITCOUNT
786	POP_FRAME
787	iret
788
789/*
790 *
791 */
792forward_irq:
793	MCOUNT
794	cmpl	$0,_invltlb_ok
795	jz	4f
796
797	cmpl	$0, CNAME(forward_irq_enabled)
798	jz	4f
799
800	movl	_mp_lock,%eax
801	cmpl	$FREE_LOCK,%eax
802	jne	1f
803	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
8041:
805	shrl	$24,%eax
806	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
807	shll	$24,%ecx
808	movl	lapic_icr_hi, %eax
809	andl	$~APIC_ID_MASK, %eax
810	orl	%ecx, %eax
811	movl	%eax, lapic_icr_hi
812
8132:
814	movl	lapic_icr_lo, %eax
815	andl	$APIC_DELSTAT_MASK,%eax
816	jnz	2b
817	movl	lapic_icr_lo, %eax
818	andl	$APIC_RESV2_MASK, %eax
819	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
820	movl	%eax, lapic_icr_lo
8213:
822	movl	lapic_icr_lo, %eax
823	andl	$APIC_DELSTAT_MASK,%eax
824	jnz	3b
8254:
826	ret
827
828/*
829 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
830 *
831 *  - Signals its receipt.
832 *  - Waits for permission to restart.
833 *  - Signals its restart.
834 */
835
836	.text
837	SUPERALIGN_TEXT
838	.globl _Xcpustop
839_Xcpustop:
840	pushl	%ebp
841	movl	%esp, %ebp
842	pushl	%eax
843	pushl	%ecx
844	pushl	%edx
845	pushl	%ds			/* save current data segment */
846	pushl	%fs
847
848	movl	$KDSEL, %eax
849	movl	%ax, %ds		/* use KERNEL data segment */
850	movl	$KPSEL, %eax
851	movl	%ax, %fs
852
853	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
854
855	movl	_cpuid, %eax
856	imull	$PCB_SIZE, %eax
857	leal	CNAME(stoppcbs)(%eax), %eax
858	pushl	%eax
859	call	CNAME(savectx)		/* Save process context */
860	addl	$4, %esp
861
862
863	movl	_cpuid, %eax
864
865	lock
866	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8671:
868	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
869	jnc	1b
870
871	lock
872	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
873	lock
874	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
875
876	test	%eax, %eax
877	jnz	2f
878
879	movl	CNAME(cpustop_restartfunc), %eax
880	test	%eax, %eax
881	jz	2f
882	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
883
884	call	%eax
8852:
886	popl	%fs
887	popl	%ds			/* restore previous data segment */
888	popl	%edx
889	popl	%ecx
890	popl	%eax
891	movl	%ebp, %esp
892	popl	%ebp
893	iret
894
895
896MCOUNT_LABEL(bintr)
897	FAST_INTR(0,fastintr0)
898	FAST_INTR(1,fastintr1)
899	FAST_INTR(2,fastintr2)
900	FAST_INTR(3,fastintr3)
901	FAST_INTR(4,fastintr4)
902	FAST_INTR(5,fastintr5)
903	FAST_INTR(6,fastintr6)
904	FAST_INTR(7,fastintr7)
905	FAST_INTR(8,fastintr8)
906	FAST_INTR(9,fastintr9)
907	FAST_INTR(10,fastintr10)
908	FAST_INTR(11,fastintr11)
909	FAST_INTR(12,fastintr12)
910	FAST_INTR(13,fastintr13)
911	FAST_INTR(14,fastintr14)
912	FAST_INTR(15,fastintr15)
913	FAST_INTR(16,fastintr16)
914	FAST_INTR(17,fastintr17)
915	FAST_INTR(18,fastintr18)
916	FAST_INTR(19,fastintr19)
917	FAST_INTR(20,fastintr20)
918	FAST_INTR(21,fastintr21)
919	FAST_INTR(22,fastintr22)
920	FAST_INTR(23,fastintr23)
921#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
922	INTR(0,intr0, CLKINTR_PENDING)
923	INTR(1,intr1,)
924	INTR(2,intr2,)
925	INTR(3,intr3,)
926	INTR(4,intr4,)
927	INTR(5,intr5,)
928	INTR(6,intr6,)
929	INTR(7,intr7,)
930	INTR(8,intr8,)
931	INTR(9,intr9,)
932	INTR(10,intr10,)
933	INTR(11,intr11,)
934	INTR(12,intr12,)
935	INTR(13,intr13,)
936	INTR(14,intr14,)
937	INTR(15,intr15,)
938	INTR(16,intr16,)
939	INTR(17,intr17,)
940	INTR(18,intr18,)
941	INTR(19,intr19,)
942	INTR(20,intr20,)
943	INTR(21,intr21,)
944	INTR(22,intr22,)
945	INTR(23,intr23,)
946MCOUNT_LABEL(eintr)
947
948	.data
949/*
950 * Addresses of interrupt handlers.
951 *  XresumeNN: Resumption addresses for HWIs.
952 */
953	.globl _ihandlers
954_ihandlers:
955/*
956 * used by:
957 *  ipl.s:	doreti_unpend
958 */
959	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
960	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
961	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
962	.long	Xresume12, Xresume13, Xresume14, Xresume15
963	.long	Xresume16, Xresume17, Xresume18, Xresume19
964	.long	Xresume20, Xresume21, Xresume22, Xresume23
965/*
966 * used by:
967 *  ipl.s:	doreti_unpend
968 *  apic_ipl.s:	splz_unpend
969 */
970	.long	_swi_null, swi_net, _swi_null, _swi_null
971	.long	_swi_vm, _swi_null, _softclock, swi_ast
972
973imasks:				/* masks for interrupt handlers */
974	.space	NHWI*4		/* padding; HWI masks are elsewhere */
975
976	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
977	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
978
979/* active flag for lazy masking */
980iactive:
981	.long	0
982
983#ifdef COUNT_XINVLTLB_HITS
984	.globl	_xhits
985_xhits:
986	.space	(NCPU * 4), 0
987#endif /* COUNT_XINVLTLB_HITS */
988
989/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
990	.globl _stopped_cpus, _started_cpus
991_stopped_cpus:
992	.long	0
993_started_cpus:
994	.long	0
995
996#ifdef BETTER_CLOCK
997	.globl _checkstate_probed_cpus
998_checkstate_probed_cpus:
999	.long	0
1000#endif /* BETTER_CLOCK */
1001	.globl _checkstate_need_ast
1002_checkstate_need_ast:
1003	.long	0
1004_checkstate_pending_ast:
1005	.long	0
1006	.globl CNAME(forward_irq_misscnt)
1007	.globl CNAME(forward_irq_toodeepcnt)
1008	.globl CNAME(forward_irq_hitcnt)
1009	.globl CNAME(resched_cpus)
1010	.globl CNAME(want_resched_cnt)
1011	.globl CNAME(cpuast_cnt)
1012	.globl CNAME(cpustop_restartfunc)
1013CNAME(forward_irq_misscnt):
1014	.long 0
1015CNAME(forward_irq_hitcnt):
1016	.long 0
1017CNAME(forward_irq_toodeepcnt):
1018	.long 0
1019CNAME(resched_cpus):
1020	.long 0
1021CNAME(want_resched_cnt):
1022	.long 0
1023CNAME(cpuast_cnt):
1024	.long 0
1025CNAME(cpustop_restartfunc):
1026	.long 0
1027
1028
1029
1030	.globl	_apic_pin_trigger
1031_apic_pin_trigger:
1032	.long	0
1033
1034	.text
1035