apic_vector.s revision 47588
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.37 1999/04/28 01:04:12 luoqi Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	pushl	%fs ;							\
62	movl	$KDSEL,%eax ;						\
63	movl	%ax,%ds ;						\
64	MAYBE_MOVW_AX_ES ;						\
65	movl	$KPSEL,%eax ;						\
66	movl	%ax,%fs ;						\
67	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
68	pushl	_intr_unit + (irq_num) * 4 ;				\
69	GET_FAST_INTR_LOCK ;						\
70	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
71	REL_FAST_INTR_LOCK ;						\
72	addl	$4, %esp ;						\
73	movl	$0, lapic_eoi ;						\
74	lock ; 								\
75	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
76	movl	_intr_countp + (irq_num) * 4, %eax ;			\
77	lock ; 								\
78	incl	(%eax) ;						\
79	MEXITCOUNT ;							\
80	popl	%fs ;							\
81	MAYBE_POPL_ES ;							\
82	popl	%ds ;							\
83	popl	%edx ;							\
84	popl	%ecx ;							\
85	popl	%eax ;							\
86	iret
87
88#else /* FAST_WITHOUTCPL */
89
90#define	FAST_INTR(irq_num, vec_name)					\
91	.text ;								\
92	SUPERALIGN_TEXT ;						\
93IDTVEC(vec_name) ;							\
94	pushl	%eax ;		/* save only call-used registers */	\
95	pushl	%ecx ;							\
96	pushl	%edx ;							\
97	pushl	%ds ;							\
98	MAYBE_PUSHL_ES ;						\
99	pushl	%fs ;							\
100	movl	$KDSEL, %eax ;						\
101	movl	%ax, %ds ;						\
102	MAYBE_MOVW_AX_ES ;						\
103	movl	$KPSEL, %eax ;						\
104	movl	%ax, %fs ;						\
105	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
106	GET_FAST_INTR_LOCK ;						\
107	pushl	_intr_unit + (irq_num) * 4 ;				\
108	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
109	addl	$4, %esp ;						\
110	movl	$0, lapic_eoi ;						\
111	lock ; 								\
112	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
113	movl	_intr_countp + (irq_num) * 4,%eax ;			\
114	lock ; 								\
115	incl	(%eax) ;						\
116	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
117	notl	%eax ;							\
118	andl	_ipending, %eax ;					\
119	jne	2f ; 		/* yes, maybe handle them */		\
1201: ;									\
121	MEXITCOUNT ;							\
122	REL_FAST_INTR_LOCK ;						\
123	popl	%fs ;							\
124	MAYBE_POPL_ES ;							\
125	popl	%ds ;							\
126	popl	%edx ;							\
127	popl	%ecx ;							\
128	popl	%eax ;							\
129	iret ;								\
130;									\
131	ALIGN_TEXT ;							\
1322: ;									\
133	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
134	jae	1b ;		/* no, return */			\
135	movl	_cpl, %eax ;						\
136	/* XXX next line is probably unnecessary now. */		\
137	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
138	lock ; 								\
139	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
140	sti ;			/* to do this as early as possible */	\
141	popl	%fs ;		/* discard most of thin frame ... */	\
142	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
143	popl	%ecx ;		/* ... original %ds ... */		\
144	popl	%edx ;							\
145	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
146	pushal ;		/* build fat frame (grrr) ... */	\
147	pushl	%ecx ;		/* ... actually %ds ... */		\
148	pushl	%es ;							\
149	pushl	%fs ;
150	movl	$KDSEL, %eax ;						\
151	movl	%ax, %es ;						\
152	movl	$KPSEL, %eax ;
153	movl	%ax, %fs ;
154	movl	(3+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
155	movl	%ecx, (3+6)*4(%esp) ;	/* ... to fat frame ... */	\
156	movl	(3+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
157	pushl	%eax ;							\
158	subl	$4, %esp ;	/* junk for unit number */		\
159	MEXITCOUNT ;							\
160	jmp	_doreti
161
162#endif /** FAST_WITHOUTCPL */
163
164
165/*
166 *
167 */
168#define PUSH_FRAME							\
169	pushl	$0 ;		/* dummy error code */			\
170	pushl	$0 ;		/* dummy trap type */			\
171	pushal ;							\
172	pushl	%ds ;		/* save data and extra segments ... */	\
173	pushl	%es ;							\
174	pushl	%fs
175
176#define POP_FRAME							\
177	popl	%fs ;							\
178	popl	%es ;							\
179	popl	%ds ;							\
180	popal ;								\
181	addl	$4+4,%esp
182
183#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
184#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
185
186#define MASK_IRQ(irq_num)						\
187	IMASK_LOCK ;				/* into critical reg */	\
188	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
189	jne	7f ;			/* masked, don't mask */	\
190	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
191	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
192	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
193	movl	%eax, (%ecx) ;			/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1977: ;						/* already masked */	\
198	IMASK_UNLOCK
199/*
200 * Test to see whether we are handling an edge or level triggered INT.
201 *  Level-triggered INTs must still be masked as we don't clear the source,
202 *  and the EOI cycle would cause redundant INTs to occur.
203 */
204#define MASK_LEVEL_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
206	jz	9f ;				/* edge, don't mask */	\
207	MASK_IRQ(irq_num) ;						\
2089:
209
210
211#ifdef APIC_INTR_REORDER
212#define EOI_IRQ(irq_num)						\
213	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
214	movl	(%eax), %eax ;						\
215	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
216	jz	9f ;				/* not active */	\
217	movl	$0, lapic_eoi ;						\
218	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2199:
220
221#else
222#define EOI_IRQ(irq_num)						\
223	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
224	jz	9f	;			/* not active */	\
225	movl	$0, lapic_eoi;						\
226	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2279:
228#endif
229
230
231/*
232 * Test to see if the source is currntly masked, clear if so.
233 */
234#define UNMASK_IRQ(irq_num)					\
235	IMASK_LOCK ;				/* into critical reg */	\
236	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
237	je	7f ;			/* bit clear, not masked */	\
238	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
239	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
240	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
241	movl	%eax,(%ecx) ;			/* write the index */	\
242	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
243	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
244	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2457: ;									\
246	IMASK_UNLOCK
247
248#ifdef INTR_SIMPLELOCK
249#define ENLOCK
250#define DELOCK
251#define LATELOCK call	_get_isrlock
252#else
253#define ENLOCK \
254	ISR_TRYLOCK ;		/* XXX this is going away... */		\
255	testl	%eax, %eax ;			/* did we get it? */	\
256	jz	3f
257#define DELOCK	ISR_RELLOCK
258#define LATELOCK
259#endif
260
261#ifdef APIC_INTR_DIAGNOSTIC
262#ifdef APIC_INTR_DIAGNOSTIC_IRQ
263log_intr_event:
264	pushf
265	cli
266	pushl	$CNAME(apic_itrace_debuglock)
267	call	_s_lock_np
268	addl	$4, %esp
269	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
270	andl	$32767, %ecx
271	movl	_cpuid, %eax
272	shll	$8,	%eax
273	orl	8(%esp), %eax
274	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
275	incl	%ecx
276	andl	$32767, %ecx
277	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
278	pushl	$CNAME(apic_itrace_debuglock)
279	call	_s_unlock_np
280	addl	$4, %esp
281	popf
282	ret
283
284
285#define APIC_ITRACE(name, irq_num, id)					\
286	lock ;					/* MP-safe */		\
287	incl	CNAME(name) + (irq_num) * 4 ;				\
288	pushl	%eax ;							\
289	pushl	%ecx ;							\
290	pushl	%edx ;							\
291	movl	$(irq_num), %eax ;					\
292	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
293	jne	7f ;							\
294	pushl	$id ;							\
295	call	log_intr_event ;					\
296	addl	$4, %esp ;						\
2977: ;									\
298	popl	%edx ;							\
299	popl	%ecx ;							\
300	popl	%eax
301#else
302#define APIC_ITRACE(name, irq_num, id)					\
303	lock ;					/* MP-safe */		\
304	incl	CNAME(name) + (irq_num) * 4
305#endif
306
307#define APIC_ITRACE_ENTER 1
308#define APIC_ITRACE_EOI 2
309#define APIC_ITRACE_TRYISRLOCK 3
310#define APIC_ITRACE_GOTISRLOCK 4
311#define APIC_ITRACE_ENTER2 5
312#define APIC_ITRACE_LEAVE 6
313#define APIC_ITRACE_UNMASK 7
314#define APIC_ITRACE_ACTIVE 8
315#define APIC_ITRACE_MASKED 9
316#define APIC_ITRACE_NOISRLOCK 10
317#define APIC_ITRACE_MASKED2 11
318#define APIC_ITRACE_SPLZ 12
319#define APIC_ITRACE_DORETI 13
320
321#else
322#define APIC_ITRACE(name, irq_num, id)
323#endif
324
325#ifdef CPL_AND_CML
326
327#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
328	.text ;								\
329	SUPERALIGN_TEXT ;						\
330/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
331IDTVEC(vec_name) ;							\
332	PUSH_FRAME ;							\
333	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
334	movl	%ax, %ds ;						\
335	movl	%ax, %es ;						\
336	movl	$KPSEL, %eax ;						\
337	movl	%ax, %fs ;						\
338;									\
339	maybe_extra_ipending ;						\
340;									\
341	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
342	lock ;					/* MP-safe */		\
343	btsl	$(irq_num), iactive ;		/* lazy masking */	\
344	jc	1f ;				/* already active */	\
345;									\
346	MASK_LEVEL_IRQ(irq_num) ;					\
347	EOI_IRQ(irq_num) ;						\
3480: ;									\
349	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
350	ENLOCK ;							\
351;									\
352	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
353	AVCPL_LOCK ;				/* MP-safe */		\
354	testl	$IRQ_BIT(irq_num), _cpl ;				\
355	jne	2f ;				/* this INT masked */	\
356	testl	$IRQ_BIT(irq_num), _cml ;				\
357	jne	2f ;				/* this INT masked */	\
358	orl	$IRQ_BIT(irq_num), _cil ;				\
359	AVCPL_UNLOCK ;							\
360;									\
361	incb	_intr_nesting_level ;					\
362;	 								\
363  /* entry point used by doreti_unpend for HWIs. */			\
364__CONCAT(Xresume,irq_num): ;						\
365	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
366	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
367	movl	_intr_countp + (irq_num) * 4, %eax ;			\
368	lock ;	incl	(%eax) ;					\
369;									\
370	AVCPL_LOCK ;				/* MP-safe */		\
371	movl	_cml, %eax ;						\
372	pushl	%eax ;							\
373	orl	_intr_mask + (irq_num) * 4, %eax ;			\
374	movl	%eax, _cml ;						\
375	AVCPL_UNLOCK ;							\
376;									\
377	pushl	_intr_unit + (irq_num) * 4 ;				\
378	incl	_inside_intr ;						\
379	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
380	sti ;								\
381	call	*_intr_handler + (irq_num) * 4 ;			\
382	cli ;								\
383	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
384	decl	_inside_intr ;						\
385;									\
386	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
387	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
388	UNMASK_IRQ(irq_num) ;						\
389	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
390	sti ;				/* doreti repeats cli/sti */	\
391	MEXITCOUNT ;							\
392	LATELOCK ;							\
393	jmp	_doreti ;						\
394;									\
395	ALIGN_TEXT ;							\
3961: ;						/* active */		\
397	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
398	MASK_IRQ(irq_num) ;						\
399	EOI_IRQ(irq_num) ;						\
400	AVCPL_LOCK ;				/* MP-safe */		\
401	orl	$IRQ_BIT(irq_num), _ipending ;				\
402	AVCPL_UNLOCK ;							\
403	lock ;								\
404	btsl	$(irq_num), iactive ;		/* still active */	\
405	jnc	0b ;				/* retry */		\
406	POP_FRAME ;							\
407	iret ;								\
408;									\
409	ALIGN_TEXT ;							\
4102: ;						/* masked by cpl|cml */	\
411	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
412	orl	$IRQ_BIT(irq_num), _ipending ;				\
413	AVCPL_UNLOCK ;							\
414	DELOCK ;		/* XXX this is going away... */		\
415	POP_FRAME ;							\
416	iret ;								\
417	ALIGN_TEXT ;							\
4183: ; 			/* other cpu has isr lock */			\
419	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
420	AVCPL_LOCK ;				/* MP-safe */		\
421	orl	$IRQ_BIT(irq_num), _ipending ;				\
422	testl	$IRQ_BIT(irq_num), _cpl ;				\
423	jne	4f ;				/* this INT masked */	\
424	testl	$IRQ_BIT(irq_num), _cml ;				\
425	jne	4f ;				/* this INT masked */	\
426	orl	$IRQ_BIT(irq_num), _cil ;				\
427	AVCPL_UNLOCK ;							\
428	call	forward_irq ;	/* forward irq to lock holder */	\
429	POP_FRAME ;	 			/* and return */	\
430	iret ;								\
431	ALIGN_TEXT ;							\
4324: ;	 					/* blocked */		\
433	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
434	AVCPL_UNLOCK ;							\
435	POP_FRAME ;	 			/* and return */	\
436	iret
437
438#else /* CPL_AND_CML */
439
440
441#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
442	.text ;								\
443	SUPERALIGN_TEXT ;						\
444/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
445IDTVEC(vec_name) ;							\
446	PUSH_FRAME ;							\
447	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
448	movl	%ax, %ds ;						\
449	movl	%ax, %es ;						\
450	movl	$KPSEL, %eax ;						\
451	movl	%ax, %fs ;						\
452;									\
453	maybe_extra_ipending ;						\
454;									\
455	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
456	lock ;					/* MP-safe */		\
457	btsl	$(irq_num), iactive ;		/* lazy masking */	\
458	jc	1f ;				/* already active */	\
459;									\
460	MASK_LEVEL_IRQ(irq_num) ;					\
461	EOI_IRQ(irq_num) ;						\
4620: ;									\
463	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
464	ISR_TRYLOCK ;		/* XXX this is going away... */		\
465	testl	%eax, %eax ;			/* did we get it? */	\
466	jz	3f ;				/* no */		\
467;									\
468	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
469	AVCPL_LOCK ;				/* MP-safe */		\
470	testl	$IRQ_BIT(irq_num), _cpl ;				\
471	jne	2f ;				/* this INT masked */	\
472	AVCPL_UNLOCK ;							\
473;									\
474	incb	_intr_nesting_level ;					\
475;	 								\
476  /* entry point used by doreti_unpend for HWIs. */			\
477__CONCAT(Xresume,irq_num): ;						\
478	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
479	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
480	movl	_intr_countp + (irq_num) * 4, %eax ;			\
481	lock ;	incl	(%eax) ;					\
482;									\
483	AVCPL_LOCK ;				/* MP-safe */		\
484	movl	_cpl, %eax ;						\
485	pushl	%eax ;							\
486	orl	_intr_mask + (irq_num) * 4, %eax ;			\
487	movl	%eax, _cpl ;						\
488	andl	$~IRQ_BIT(irq_num), _ipending ;				\
489	AVCPL_UNLOCK ;							\
490;									\
491	pushl	_intr_unit + (irq_num) * 4 ;				\
492	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
493	sti ;								\
494	call	*_intr_handler + (irq_num) * 4 ;			\
495	cli ;								\
496	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
497;									\
498	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
499	UNMASK_IRQ(irq_num) ;						\
500	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
501	sti ;				/* doreti repeats cli/sti */	\
502	MEXITCOUNT ;							\
503	jmp	_doreti ;						\
504;									\
505	ALIGN_TEXT ;							\
5061: ;						/* active  */		\
507	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
508	MASK_IRQ(irq_num) ;						\
509	EOI_IRQ(irq_num) ;						\
510	AVCPL_LOCK ;				/* MP-safe */		\
511	orl	$IRQ_BIT(irq_num), _ipending ;				\
512	AVCPL_UNLOCK ;							\
513	lock ;								\
514	btsl	$(irq_num), iactive ;		/* still active */	\
515	jnc	0b ;				/* retry */		\
516	POP_FRAME ;							\
517	iret ;		/* XXX:	 iactive bit might be 0 now */		\
518	ALIGN_TEXT ;							\
5192: ;				/* masked by cpl, leave iactive set */	\
520	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
521	orl	$IRQ_BIT(irq_num), _ipending ;				\
522	AVCPL_UNLOCK ;							\
523	ISR_RELLOCK ;		/* XXX this is going away... */		\
524	POP_FRAME ;							\
525	iret ;								\
526	ALIGN_TEXT ;							\
5273: ; 			/* other cpu has isr lock */			\
528	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
529	AVCPL_LOCK ;				/* MP-safe */		\
530	orl	$IRQ_BIT(irq_num), _ipending ;				\
531	testl	$IRQ_BIT(irq_num), _cpl ;				\
532	jne	4f ;				/* this INT masked */	\
533	AVCPL_UNLOCK ;							\
534	call	forward_irq ;	 /* forward irq to lock holder */	\
535	POP_FRAME ;	 			/* and return */	\
536	iret ;								\
537	ALIGN_TEXT ;							\
5384: ;	 					/* blocked */		\
539	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
540	AVCPL_UNLOCK ;							\
541	POP_FRAME ;	 			/* and return */	\
542	iret
543
544#endif /* CPL_AND_CML */
545
546
547/*
548 * Handle "spurious INTerrupts".
549 * Notes:
550 *  This is different than the "spurious INTerrupt" generated by an
551 *   8259 PIC for missing INTs.  See the APIC documentation for details.
552 *  This routine should NOT do an 'EOI' cycle.
553 */
554	.text
555	SUPERALIGN_TEXT
556	.globl _Xspuriousint
557_Xspuriousint:
558
559	/* No EOI cycle used here */
560
561	iret
562
563
564/*
565 * Handle TLB shootdowns.
566 */
567	.text
568	SUPERALIGN_TEXT
569	.globl	_Xinvltlb
570_Xinvltlb:
571	pushl	%eax
572
573#ifdef COUNT_XINVLTLB_HITS
574	pushl	%fs
575	movl	$KPSEL, %eax
576	movl	%ax, %fs
577	movl	_cpuid, %eax
578	popl	%fs
579	ss
580	incl	_xhits(,%eax,4)
581#endif /* COUNT_XINVLTLB_HITS */
582
583	movl	%cr3, %eax		/* invalidate the TLB */
584	movl	%eax, %cr3
585
586	ss				/* stack segment, avoid %ds load */
587	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
588
589	popl	%eax
590	iret
591
592
593#ifdef BETTER_CLOCK
594
595/*
596 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
597 *
598 *  - Stores current cpu state in checkstate_cpustate[cpuid]
599 *      0 == user, 1 == sys, 2 == intr
600 *  - Stores current process in checkstate_curproc[cpuid]
601 *
602 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
603 *
604 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
605 */
606
607	.text
608	SUPERALIGN_TEXT
609	.globl _Xcpucheckstate
610	.globl _checkstate_cpustate
611	.globl _checkstate_curproc
612	.globl _checkstate_pc
613_Xcpucheckstate:
614	pushl	%eax
615	pushl	%ebx
616	pushl	%ds			/* save current data segment */
617	pushl	%fs
618
619	movl	$KDSEL, %eax
620	movl	%ax, %ds		/* use KERNEL data segment */
621	movl	$KPSEL, %eax
622	movl	%ax, %fs
623
624	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
625
626	movl	$0, %ebx
627	movl	20(%esp), %eax
628	andl	$3, %eax
629	cmpl	$3, %eax
630	je	1f
631#ifdef VM86
632	testl	$PSL_VM, 24(%esp)
633	jne	1f
634#endif
635	incl	%ebx			/* system or interrupt */
636#ifdef CPL_AND_CML
637	cmpl	$0, _inside_intr
638	je	1f
639	incl	%ebx			/* interrupt */
640#endif
6411:
642	movl	_cpuid, %eax
643	movl	%ebx, _checkstate_cpustate(,%eax,4)
644	movl	_curproc, %ebx
645	movl	%ebx, _checkstate_curproc(,%eax,4)
646	movl	16(%esp), %ebx
647	movl	%ebx, _checkstate_pc(,%eax,4)
648
649	lock				/* checkstate_probed_cpus |= (1<<id) */
650	btsl	%eax, _checkstate_probed_cpus
651
652	popl	%fs
653	popl	%ds			/* restore previous data segment */
654	popl	%ebx
655	popl	%eax
656	iret
657
658#endif /* BETTER_CLOCK */
659
660/*
661 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
662 *
663 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
664 *
665 *  - We need a better method of triggering asts on other cpus.
666 */
667
668	.text
669	SUPERALIGN_TEXT
670	.globl _Xcpuast
671_Xcpuast:
672	PUSH_FRAME
673	movl	$KDSEL, %eax
674	movl	%ax, %ds		/* use KERNEL data segment */
675	movl	%ax, %es
676	movl	$KPSEL, %eax
677	movl	%ax, %fs
678
679	movl	_cpuid, %eax
680	lock				/* checkstate_need_ast &= ~(1<<id) */
681	btrl	%eax, _checkstate_need_ast
682	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
683
684	lock
685	btsl	%eax, _checkstate_pending_ast
686	jc	1f
687
688	FAKE_MCOUNT(13*4(%esp))
689
690	/*
691	 * Giant locks do not come cheap.
692	 * A lot of cycles are going to be wasted here.
693	 */
694	call	_get_isrlock
695
696	AVCPL_LOCK
697#ifdef CPL_AND_CML
698	movl	_cml, %eax
699#else
700	movl	_cpl, %eax
701#endif
702	pushl	%eax
703	orl	$SWI_AST_PENDING, _ipending
704	AVCPL_UNLOCK
705	lock
706	incb	_intr_nesting_level
707	sti
708
709	pushl	$0
710
711	movl	_cpuid, %eax
712	lock
713	btrl	%eax, _checkstate_pending_ast
714	lock
715	btrl	%eax, CNAME(resched_cpus)
716	jnc	2f
717	movl	$1, CNAME(want_resched)
718	lock
719	incl	CNAME(want_resched_cnt)
7202:
721	lock
722	incl	CNAME(cpuast_cnt)
723	MEXITCOUNT
724	jmp	_doreti
7251:
726	/* We are already in the process of delivering an ast for this CPU */
727	POP_FRAME
728	iret
729
730
731/*
732 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
733 */
734
735	.text
736	SUPERALIGN_TEXT
737	.globl _Xforward_irq
738_Xforward_irq:
739	PUSH_FRAME
740	movl	$KDSEL, %eax
741	movl	%ax, %ds		/* use KERNEL data segment */
742	movl	%ax, %es
743	movl	$KPSEL, %eax
744	movl	%ax, %fs
745
746	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
747
748	FAKE_MCOUNT(13*4(%esp))
749
750	ISR_TRYLOCK
751	testl	%eax,%eax		/* Did we get the lock ? */
752	jz  1f				/* No */
753
754	lock
755	incl	CNAME(forward_irq_hitcnt)
756	cmpb	$4, _intr_nesting_level
757	jae	2f
758
759	AVCPL_LOCK
760#ifdef CPL_AND_CML
761	movl	_cml, %eax
762#else
763	movl	_cpl, %eax
764#endif
765	pushl	%eax
766	AVCPL_UNLOCK
767	lock
768	incb	_intr_nesting_level
769	sti
770
771	pushl	$0
772
773	MEXITCOUNT
774	jmp	_doreti			/* Handle forwarded interrupt */
7751:
776	lock
777	incl	CNAME(forward_irq_misscnt)
778	call	forward_irq	/* Oops, we've lost the isr lock */
779	MEXITCOUNT
780	POP_FRAME
781	iret
7822:
783	lock
784	incl	CNAME(forward_irq_toodeepcnt)
7853:
786	ISR_RELLOCK
787	MEXITCOUNT
788	POP_FRAME
789	iret
790
791/*
792 *
793 */
794forward_irq:
795	MCOUNT
796	cmpl	$0,_invltlb_ok
797	jz	4f
798
799	cmpl	$0, CNAME(forward_irq_enabled)
800	jz	4f
801
802	movl	_mp_lock,%eax
803	cmpl	$FREE_LOCK,%eax
804	jne	1f
805	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
8061:
807	shrl	$24,%eax
808	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
809	shll	$24,%ecx
810	movl	lapic_icr_hi, %eax
811	andl	$~APIC_ID_MASK, %eax
812	orl	%ecx, %eax
813	movl	%eax, lapic_icr_hi
814
8152:
816	movl	lapic_icr_lo, %eax
817	andl	$APIC_DELSTAT_MASK,%eax
818	jnz	2b
819	movl	lapic_icr_lo, %eax
820	andl	$APIC_RESV2_MASK, %eax
821	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
822	movl	%eax, lapic_icr_lo
8233:
824	movl	lapic_icr_lo, %eax
825	andl	$APIC_DELSTAT_MASK,%eax
826	jnz	3b
8274:
828	ret
829
830/*
831 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
832 *
833 *  - Signals its receipt.
834 *  - Waits for permission to restart.
835 *  - Signals its restart.
836 */
837
838	.text
839	SUPERALIGN_TEXT
840	.globl _Xcpustop
841_Xcpustop:
842	pushl	%ebp
843	movl	%esp, %ebp
844	pushl	%eax
845	pushl	%ecx
846	pushl	%edx
847	pushl	%ds			/* save current data segment */
848	pushl	%fs
849
850	movl	$KDSEL, %eax
851	movl	%ax, %ds		/* use KERNEL data segment */
852	movl	$KPSEL, %eax
853	movl	%ax, %fs
854
855	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
856
857	movl	_cpuid, %eax
858	imull	$PCB_SIZE, %eax
859	leal	CNAME(stoppcbs)(%eax), %eax
860	pushl	%eax
861	call	CNAME(savectx)		/* Save process context */
862	addl	$4, %esp
863
864
865	movl	_cpuid, %eax
866
867	lock
868	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8691:
870	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
871	jnc	1b
872
873	lock
874	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
875	lock
876	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
877
878	test	%eax, %eax
879	jnz	2f
880
881	movl	CNAME(cpustop_restartfunc), %eax
882	test	%eax, %eax
883	jz	2f
884	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
885
886	call	%eax
8872:
888	popl	%fs
889	popl	%ds			/* restore previous data segment */
890	popl	%edx
891	popl	%ecx
892	popl	%eax
893	movl	%ebp, %esp
894	popl	%ebp
895	iret
896
897
898MCOUNT_LABEL(bintr)
899	FAST_INTR(0,fastintr0)
900	FAST_INTR(1,fastintr1)
901	FAST_INTR(2,fastintr2)
902	FAST_INTR(3,fastintr3)
903	FAST_INTR(4,fastintr4)
904	FAST_INTR(5,fastintr5)
905	FAST_INTR(6,fastintr6)
906	FAST_INTR(7,fastintr7)
907	FAST_INTR(8,fastintr8)
908	FAST_INTR(9,fastintr9)
909	FAST_INTR(10,fastintr10)
910	FAST_INTR(11,fastintr11)
911	FAST_INTR(12,fastintr12)
912	FAST_INTR(13,fastintr13)
913	FAST_INTR(14,fastintr14)
914	FAST_INTR(15,fastintr15)
915	FAST_INTR(16,fastintr16)
916	FAST_INTR(17,fastintr17)
917	FAST_INTR(18,fastintr18)
918	FAST_INTR(19,fastintr19)
919	FAST_INTR(20,fastintr20)
920	FAST_INTR(21,fastintr21)
921	FAST_INTR(22,fastintr22)
922	FAST_INTR(23,fastintr23)
923#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
924	INTR(0,intr0, CLKINTR_PENDING)
925	INTR(1,intr1,)
926	INTR(2,intr2,)
927	INTR(3,intr3,)
928	INTR(4,intr4,)
929	INTR(5,intr5,)
930	INTR(6,intr6,)
931	INTR(7,intr7,)
932	INTR(8,intr8,)
933	INTR(9,intr9,)
934	INTR(10,intr10,)
935	INTR(11,intr11,)
936	INTR(12,intr12,)
937	INTR(13,intr13,)
938	INTR(14,intr14,)
939	INTR(15,intr15,)
940	INTR(16,intr16,)
941	INTR(17,intr17,)
942	INTR(18,intr18,)
943	INTR(19,intr19,)
944	INTR(20,intr20,)
945	INTR(21,intr21,)
946	INTR(22,intr22,)
947	INTR(23,intr23,)
948MCOUNT_LABEL(eintr)
949
950	.data
951/*
952 * Addresses of interrupt handlers.
953 *  XresumeNN: Resumption addresses for HWIs.
954 */
955	.globl _ihandlers
956_ihandlers:
957/*
958 * used by:
959 *  ipl.s:	doreti_unpend
960 */
961	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
962	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
963	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
964	.long	Xresume12, Xresume13, Xresume14, Xresume15
965	.long	Xresume16, Xresume17, Xresume18, Xresume19
966	.long	Xresume20, Xresume21, Xresume22, Xresume23
967/*
968 * used by:
969 *  ipl.s:	doreti_unpend
970 *  apic_ipl.s:	splz_unpend
971 */
972	.long	_swi_null, swi_net, _swi_null, _swi_null
973	.long	_swi_vm, _swi_null, _softclock, swi_ast
974
975imasks:				/* masks for interrupt handlers */
976	.space	NHWI*4		/* padding; HWI masks are elsewhere */
977
978	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
979	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
980
981/* active flag for lazy masking */
982iactive:
983	.long	0
984
985#ifdef COUNT_XINVLTLB_HITS
986	.globl	_xhits
987_xhits:
988	.space	(NCPU * 4), 0
989#endif /* COUNT_XINVLTLB_HITS */
990
991/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
992	.globl _stopped_cpus, _started_cpus
993_stopped_cpus:
994	.long	0
995_started_cpus:
996	.long	0
997
998#ifdef BETTER_CLOCK
999	.globl _checkstate_probed_cpus
1000_checkstate_probed_cpus:
1001	.long	0
1002#endif /* BETTER_CLOCK */
1003	.globl _checkstate_need_ast
1004_checkstate_need_ast:
1005	.long	0
1006_checkstate_pending_ast:
1007	.long	0
1008	.globl CNAME(forward_irq_misscnt)
1009	.globl CNAME(forward_irq_toodeepcnt)
1010	.globl CNAME(forward_irq_hitcnt)
1011	.globl CNAME(resched_cpus)
1012	.globl CNAME(want_resched_cnt)
1013	.globl CNAME(cpuast_cnt)
1014	.globl CNAME(cpustop_restartfunc)
1015CNAME(forward_irq_misscnt):
1016	.long 0
1017CNAME(forward_irq_hitcnt):
1018	.long 0
1019CNAME(forward_irq_toodeepcnt):
1020	.long 0
1021CNAME(resched_cpus):
1022	.long 0
1023CNAME(want_resched_cnt):
1024	.long 0
1025CNAME(cpuast_cnt):
1026	.long 0
1027CNAME(cpustop_restartfunc):
1028	.long 0
1029
1030
1031
1032	.globl	_apic_pin_trigger
1033_apic_pin_trigger:
1034	.long	0
1035
1036	.text
1037