apic_vector.s revision 46129
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.36 1999/04/14 14:26:35 bde Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	pushl	%fs ;							\
62	movl	$KDSEL,%eax ;						\
63	movl	%ax,%ds ;						\
64	MAYBE_MOVW_AX_ES ;						\
65	movl	$KPSEL,%eax ;						\
66	movl	%ax,%fs ;						\
67	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
68	pushl	_intr_unit + (irq_num) * 4 ;				\
69	GET_FAST_INTR_LOCK ;						\
70	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
71	REL_FAST_INTR_LOCK ;						\
72	addl	$4, %esp ;						\
73	movl	$0, lapic_eoi ;						\
74	lock ; 								\
75	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
76	movl	_intr_countp + (irq_num) * 4, %eax ;			\
77	lock ; 								\
78	incl	(%eax) ;						\
79	MEXITCOUNT ;							\
80	popl	%fs ;							\
81	MAYBE_POPL_ES ;							\
82	popl	%ds ;							\
83	popl	%edx ;							\
84	popl	%ecx ;							\
85	popl	%eax ;							\
86	iret
87
88#else /* FAST_WITHOUTCPL */
89
90#define	FAST_INTR(irq_num, vec_name)					\
91	.text ;								\
92	SUPERALIGN_TEXT ;						\
93IDTVEC(vec_name) ;							\
94	pushl	%eax ;		/* save only call-used registers */	\
95	pushl	%ecx ;							\
96	pushl	%edx ;							\
97	pushl	%ds ;							\
98	MAYBE_PUSHL_ES ;						\
99	pushl	%fs ;							\
100	movl	$KDSEL, %eax ;						\
101	movl	%ax, %ds ;						\
102	MAYBE_MOVW_AX_ES ;						\
103	movl	$KPSEL, %eax ;						\
104	movl	%ax, %fs ;						\
105	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
106	GET_FAST_INTR_LOCK ;						\
107	pushl	_intr_unit + (irq_num) * 4 ;				\
108	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
109	addl	$4, %esp ;						\
110	movl	$0, lapic_eoi ;						\
111	lock ; 								\
112	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
113	movl	_intr_countp + (irq_num) * 4,%eax ;			\
114	lock ; 								\
115	incl	(%eax) ;						\
116	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
117	notl	%eax ;							\
118	andl	_ipending, %eax ;					\
119	jne	2f ; 		/* yes, maybe handle them */		\
1201: ;									\
121	MEXITCOUNT ;							\
122	REL_FAST_INTR_LOCK ;						\
123	popl	%fs ;							\
124	MAYBE_POPL_ES ;							\
125	popl	%ds ;							\
126	popl	%edx ;							\
127	popl	%ecx ;							\
128	popl	%eax ;							\
129	iret ;								\
130;									\
131	ALIGN_TEXT ;							\
1322: ;									\
133	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
134	jae	1b ;		/* no, return */			\
135	movl	_cpl, %eax ;						\
136	/* XXX next line is probably unnecessary now. */		\
137	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
138	lock ; 								\
139	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
140	sti ;			/* to do this as early as possible */	\
141	popl	%fs ;		/* discard most of thin frame ... */	\
142	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
143	popl	%ecx ;		/* ... original %ds ... */		\
144	popl	%edx ;							\
145	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
146	pushal ;		/* build fat frame (grrr) ... */	\
147	pushl	%ecx ;		/* ... actually %ds ... */		\
148	pushl	%es ;							\
149	pushl	%fs ;
150	movl	$KDSEL, %eax ;						\
151	movl	%ax, %es ;						\
152	movl	$KPSEL, %eax ;
153	movl	%ax, %fs ;
154	movl	(3+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
155	movl	%ecx, (3+6)*4(%esp) ;	/* ... to fat frame ... */	\
156	movl	(3+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
157	pushl	%eax ;							\
158	subl	$4, %esp ;	/* junk for unit number */		\
159	MEXITCOUNT ;							\
160	jmp	_doreti
161
162#endif /** FAST_WITHOUTCPL */
163
164
165/*
166 *
167 */
168#define PUSH_FRAME							\
169	pushl	$0 ;		/* dummy error code */			\
170	pushl	$0 ;		/* dummy trap type */			\
171	pushal ;							\
172	pushl	%ds ;		/* save data and extra segments ... */	\
173	pushl	%es ;							\
174	pushl	%fs
175
176#define POP_FRAME							\
177	popl	%fs ;							\
178	popl	%es ;							\
179	popl	%ds ;							\
180	popal ;								\
181	addl	$4+4,%esp
182
183#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
184#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
185
186#define MASK_IRQ(irq_num)						\
187	IMASK_LOCK ;				/* into critical reg */	\
188	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
189	jne	7f ;			/* masked, don't mask */	\
190	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
191	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
192	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
193	movl	%eax, (%ecx) ;			/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1977: ;						/* already masked */	\
198	IMASK_UNLOCK
199/*
200 * Test to see whether we are handling an edge or level triggered INT.
201 *  Level-triggered INTs must still be masked as we don't clear the source,
202 *  and the EOI cycle would cause redundant INTs to occur.
203 */
204#define MASK_LEVEL_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
206	jz	9f ;				/* edge, don't mask */	\
207	MASK_IRQ(irq_num) ;						\
2089:
209
210
211#ifdef APIC_INTR_REORDER
212#define EOI_IRQ(irq_num)						\
213	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
214	movl	(%eax), %eax ;						\
215	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
216	jz	9f ;				/* not active */	\
217	movl	$0, lapic_eoi ;						\
218	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2199:
220
221#else
222#define EOI_IRQ(irq_num)						\
223	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
224	jz	9f	;			/* not active */	\
225	movl	$0, lapic_eoi;						\
226	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2279:
228#endif
229
230
231/*
232 * Test to see if the source is currntly masked, clear if so.
233 */
234#define UNMASK_IRQ(irq_num)					\
235	IMASK_LOCK ;				/* into critical reg */	\
236	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
237	je	7f ;			/* bit clear, not masked */	\
238	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
239	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
240	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
241	movl	%eax,(%ecx) ;			/* write the index */	\
242	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
243	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
244	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2457: ;									\
246	IMASK_UNLOCK
247
248#ifdef INTR_SIMPLELOCK
249#define ENLOCK
250#define DELOCK
251#define LATELOCK call	_get_isrlock
252#else
253#define ENLOCK \
254	ISR_TRYLOCK ;		/* XXX this is going away... */		\
255	testl	%eax, %eax ;			/* did we get it? */	\
256	jz	3f
257#define DELOCK	ISR_RELLOCK
258#define LATELOCK
259#endif
260
261#ifdef APIC_INTR_DIAGNOSTIC
262#ifdef APIC_INTR_DIAGNOSTIC_IRQ
263log_intr_event:
264	pushf
265	cli
266	pushl	$CNAME(apic_itrace_debuglock)
267	call	_s_lock_np
268	addl	$4, %esp
269	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
270	andl	$32767, %ecx
271	movl	_cpuid, %eax
272	shll	$8,	%eax
273	orl	8(%esp), %eax
274	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
275	incl	%ecx
276	andl	$32767, %ecx
277	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
278	pushl	$CNAME(apic_itrace_debuglock)
279	call	_s_unlock_np
280	addl	$4, %esp
281	popf
282	ret
283
284
285#define APIC_ITRACE(name, irq_num, id)					\
286	lock ;					/* MP-safe */		\
287	incl	CNAME(name) + (irq_num) * 4 ;				\
288	pushl	%eax ;							\
289	pushl	%ecx ;							\
290	pushl	%edx ;							\
291	movl	$(irq_num), %eax ;					\
292	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
293	jne	7f ;							\
294	pushl	$id ;							\
295	call	log_intr_event ;					\
296	addl	$4, %esp ;						\
2977: ;									\
298	popl	%edx ;							\
299	popl	%ecx ;							\
300	popl	%eax
301#else
302#define APIC_ITRACE(name, irq_num, id)					\
303	lock ;					/* MP-safe */		\
304	incl	CNAME(name) + (irq_num) * 4
305#endif
306
307#define APIC_ITRACE_ENTER 1
308#define APIC_ITRACE_EOI 2
309#define APIC_ITRACE_TRYISRLOCK 3
310#define APIC_ITRACE_GOTISRLOCK 4
311#define APIC_ITRACE_ENTER2 5
312#define APIC_ITRACE_LEAVE 6
313#define APIC_ITRACE_UNMASK 7
314#define APIC_ITRACE_ACTIVE 8
315#define APIC_ITRACE_MASKED 9
316#define APIC_ITRACE_NOISRLOCK 10
317#define APIC_ITRACE_MASKED2 11
318#define APIC_ITRACE_SPLZ 12
319#define APIC_ITRACE_DORETI 13
320
321#else
322#define APIC_ITRACE(name, irq_num, id)
323#endif
324
325#ifdef CPL_AND_CML
326
327#define	INTR(irq_num, vec_name)						\
328	.text ;								\
329	SUPERALIGN_TEXT ;						\
330/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
331IDTVEC(vec_name) ;							\
332	PUSH_FRAME ;							\
333	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
334	movl	%ax, %ds ;						\
335	movl	%ax, %es ;						\
336	movl	$KPSEL, %eax ;						\
337	movl	%ax, %fs ;						\
338;									\
339	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
340	lock ;					/* MP-safe */		\
341	btsl	$(irq_num), iactive ;		/* lazy masking */	\
342	jc	1f ;				/* already active */	\
343;									\
344	MASK_LEVEL_IRQ(irq_num) ;					\
345	EOI_IRQ(irq_num) ;						\
3460: ;									\
347	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
348	ENLOCK ;							\
349;									\
350	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
351	AVCPL_LOCK ;				/* MP-safe */		\
352	testl	$IRQ_BIT(irq_num), _cpl ;				\
353	jne	2f ;				/* this INT masked */	\
354	testl	$IRQ_BIT(irq_num), _cml ;				\
355	jne	2f ;				/* this INT masked */	\
356	orl	$IRQ_BIT(irq_num), _cil ;				\
357	AVCPL_UNLOCK ;							\
358;									\
359	incb	_intr_nesting_level ;					\
360;	 								\
361  /* entry point used by doreti_unpend for HWIs. */			\
362__CONCAT(Xresume,irq_num): ;						\
363	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
364	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
365	movl	_intr_countp + (irq_num) * 4, %eax ;			\
366	lock ;	incl	(%eax) ;					\
367;									\
368	AVCPL_LOCK ;				/* MP-safe */		\
369	movl	_cml, %eax ;						\
370	pushl	%eax ;							\
371	orl	_intr_mask + (irq_num) * 4, %eax ;			\
372	movl	%eax, _cml ;						\
373	AVCPL_UNLOCK ;							\
374;									\
375	pushl	_intr_unit + (irq_num) * 4 ;				\
376	incl	_inside_intr ;						\
377	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
378	sti ;								\
379	call	*_intr_handler + (irq_num) * 4 ;			\
380	cli ;								\
381	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
382	decl	_inside_intr ;						\
383;									\
384	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
385	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
386	UNMASK_IRQ(irq_num) ;						\
387	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
388	sti ;				/* doreti repeats cli/sti */	\
389	MEXITCOUNT ;							\
390	LATELOCK ;							\
391	jmp	_doreti ;						\
392;									\
393	ALIGN_TEXT ;							\
3941: ;						/* active */		\
395	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
396	MASK_IRQ(irq_num) ;						\
397	EOI_IRQ(irq_num) ;						\
398	AVCPL_LOCK ;				/* MP-safe */		\
399	orl	$IRQ_BIT(irq_num), _ipending ;				\
400	AVCPL_UNLOCK ;							\
401	lock ;								\
402	btsl	$(irq_num), iactive ;		/* still active */	\
403	jnc	0b ;				/* retry */		\
404	POP_FRAME ;							\
405	iret ;								\
406;									\
407	ALIGN_TEXT ;							\
4082: ;						/* masked by cpl|cml */	\
409	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
410	orl	$IRQ_BIT(irq_num), _ipending ;				\
411	AVCPL_UNLOCK ;							\
412	DELOCK ;		/* XXX this is going away... */		\
413	POP_FRAME ;							\
414	iret ;								\
415	ALIGN_TEXT ;							\
4163: ; 			/* other cpu has isr lock */			\
417	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
418	AVCPL_LOCK ;				/* MP-safe */		\
419	orl	$IRQ_BIT(irq_num), _ipending ;				\
420	testl	$IRQ_BIT(irq_num), _cpl ;				\
421	jne	4f ;				/* this INT masked */	\
422	testl	$IRQ_BIT(irq_num), _cml ;				\
423	jne	4f ;				/* this INT masked */	\
424	orl	$IRQ_BIT(irq_num), _cil ;				\
425	AVCPL_UNLOCK ;							\
426	call	forward_irq ;	/* forward irq to lock holder */	\
427	POP_FRAME ;	 			/* and return */	\
428	iret ;								\
429	ALIGN_TEXT ;							\
4304: ;	 					/* blocked */		\
431	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
432	AVCPL_UNLOCK ;							\
433	POP_FRAME ;	 			/* and return */	\
434	iret
435
436#else /* CPL_AND_CML */
437
438
439#define	INTR(irq_num, vec_name)						\
440	.text ;								\
441	SUPERALIGN_TEXT ;						\
442/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
443IDTVEC(vec_name) ;							\
444	PUSH_FRAME ;							\
445	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
446	movl	%ax, %ds ;						\
447	movl	%ax, %es ;						\
448	movl	$KPSEL, %eax ;						\
449	movl	%ax, %fs ;						\
450;									\
451	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
452	lock ;					/* MP-safe */		\
453	btsl	$(irq_num), iactive ;		/* lazy masking */	\
454	jc	1f ;				/* already active */	\
455;									\
456	MASK_LEVEL_IRQ(irq_num) ;					\
457	EOI_IRQ(irq_num) ;						\
4580: ;									\
459	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
460	ISR_TRYLOCK ;		/* XXX this is going away... */		\
461	testl	%eax, %eax ;			/* did we get it? */	\
462	jz	3f ;				/* no */		\
463;									\
464	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
465	AVCPL_LOCK ;				/* MP-safe */		\
466	testl	$IRQ_BIT(irq_num), _cpl ;				\
467	jne	2f ;				/* this INT masked */	\
468	AVCPL_UNLOCK ;							\
469;									\
470	incb	_intr_nesting_level ;					\
471;	 								\
472  /* entry point used by doreti_unpend for HWIs. */			\
473__CONCAT(Xresume,irq_num): ;						\
474	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
475	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
476	movl	_intr_countp + (irq_num) * 4, %eax ;			\
477	lock ;	incl	(%eax) ;					\
478;									\
479	AVCPL_LOCK ;				/* MP-safe */		\
480	movl	_cpl, %eax ;						\
481	pushl	%eax ;							\
482	orl	_intr_mask + (irq_num) * 4, %eax ;			\
483	movl	%eax, _cpl ;						\
484	andl	$~IRQ_BIT(irq_num), _ipending ;				\
485	AVCPL_UNLOCK ;							\
486;									\
487	pushl	_intr_unit + (irq_num) * 4 ;				\
488	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
489	sti ;								\
490	call	*_intr_handler + (irq_num) * 4 ;			\
491	cli ;								\
492	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
493;									\
494	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
495	UNMASK_IRQ(irq_num) ;						\
496	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
497	sti ;				/* doreti repeats cli/sti */	\
498	MEXITCOUNT ;							\
499	jmp	_doreti ;						\
500;									\
501	ALIGN_TEXT ;							\
5021: ;						/* active  */		\
503	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
504	MASK_IRQ(irq_num) ;						\
505	EOI_IRQ(irq_num) ;						\
506	AVCPL_LOCK ;				/* MP-safe */		\
507	orl	$IRQ_BIT(irq_num), _ipending ;				\
508	AVCPL_UNLOCK ;							\
509	lock ;								\
510	btsl	$(irq_num), iactive ;		/* still active */	\
511	jnc	0b ;				/* retry */		\
512	POP_FRAME ;							\
513	iret ;		/* XXX:	 iactive bit might be 0 now */		\
514	ALIGN_TEXT ;							\
5152: ;				/* masked by cpl, leave iactive set */	\
516	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
517	orl	$IRQ_BIT(irq_num), _ipending ;				\
518	AVCPL_UNLOCK ;							\
519	ISR_RELLOCK ;		/* XXX this is going away... */		\
520	POP_FRAME ;							\
521	iret ;								\
522	ALIGN_TEXT ;							\
5233: ; 			/* other cpu has isr lock */			\
524	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
525	AVCPL_LOCK ;				/* MP-safe */		\
526	orl	$IRQ_BIT(irq_num), _ipending ;				\
527	testl	$IRQ_BIT(irq_num), _cpl ;				\
528	jne	4f ;				/* this INT masked */	\
529	AVCPL_UNLOCK ;							\
530	call	forward_irq ;	 /* forward irq to lock holder */	\
531	POP_FRAME ;	 			/* and return */	\
532	iret ;								\
533	ALIGN_TEXT ;							\
5344: ;	 					/* blocked */		\
535	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
536	AVCPL_UNLOCK ;							\
537	POP_FRAME ;	 			/* and return */	\
538	iret
539
540#endif /* CPL_AND_CML */
541
542
543/*
544 * Handle "spurious INTerrupts".
545 * Notes:
546 *  This is different than the "spurious INTerrupt" generated by an
547 *   8259 PIC for missing INTs.  See the APIC documentation for details.
548 *  This routine should NOT do an 'EOI' cycle.
549 */
550	.text
551	SUPERALIGN_TEXT
552	.globl _Xspuriousint
553_Xspuriousint:
554
555	/* No EOI cycle used here */
556
557	iret
558
559
560/*
561 * Handle TLB shootdowns.
562 */
563	.text
564	SUPERALIGN_TEXT
565	.globl	_Xinvltlb
566_Xinvltlb:
567	pushl	%eax
568
569#ifdef COUNT_XINVLTLB_HITS
570	pushl	%fs
571	movl	$KPSEL, %eax
572	movl	%ax, %fs
573	movl	_cpuid, %eax
574	popl	%fs
575	ss
576	incl	_xhits(,%eax,4)
577#endif /* COUNT_XINVLTLB_HITS */
578
579	movl	%cr3, %eax		/* invalidate the TLB */
580	movl	%eax, %cr3
581
582	ss				/* stack segment, avoid %ds load */
583	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
584
585	popl	%eax
586	iret
587
588
589#ifdef BETTER_CLOCK
590
591/*
592 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
593 *
594 *  - Stores current cpu state in checkstate_cpustate[cpuid]
595 *      0 == user, 1 == sys, 2 == intr
596 *  - Stores current process in checkstate_curproc[cpuid]
597 *
598 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
599 *
600 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
601 */
602
603	.text
604	SUPERALIGN_TEXT
605	.globl _Xcpucheckstate
606	.globl _checkstate_cpustate
607	.globl _checkstate_curproc
608	.globl _checkstate_pc
609_Xcpucheckstate:
610	pushl	%eax
611	pushl	%ebx
612	pushl	%ds			/* save current data segment */
613	pushl	%fs
614
615	movl	$KDSEL, %eax
616	movl	%ax, %ds		/* use KERNEL data segment */
617	movl	$KPSEL, %eax
618	movl	%ax, %fs
619
620	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
621
622	movl	$0, %ebx
623	movl	20(%esp), %eax
624	andl	$3, %eax
625	cmpl	$3, %eax
626	je	1f
627#ifdef VM86
628	testl	$PSL_VM, 24(%esp)
629	jne	1f
630#endif
631	incl	%ebx			/* system or interrupt */
632#ifdef CPL_AND_CML
633	cmpl	$0, _inside_intr
634	je	1f
635	incl	%ebx			/* interrupt */
636#endif
6371:
638	movl	_cpuid, %eax
639	movl	%ebx, _checkstate_cpustate(,%eax,4)
640	movl	_curproc, %ebx
641	movl	%ebx, _checkstate_curproc(,%eax,4)
642	movl	16(%esp), %ebx
643	movl	%ebx, _checkstate_pc(,%eax,4)
644
645	lock				/* checkstate_probed_cpus |= (1<<id) */
646	btsl	%eax, _checkstate_probed_cpus
647
648	popl	%fs
649	popl	%ds			/* restore previous data segment */
650	popl	%ebx
651	popl	%eax
652	iret
653
654#endif /* BETTER_CLOCK */
655
656/*
657 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
658 *
659 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
660 *
661 *  - We need a better method of triggering asts on other cpus.
662 */
663
664	.text
665	SUPERALIGN_TEXT
666	.globl _Xcpuast
667_Xcpuast:
668	PUSH_FRAME
669	movl	$KDSEL, %eax
670	movl	%ax, %ds		/* use KERNEL data segment */
671	movl	%ax, %es
672	movl	$KPSEL, %eax
673	movl	%ax, %fs
674
675	movl	_cpuid, %eax
676	lock				/* checkstate_need_ast &= ~(1<<id) */
677	btrl	%eax, _checkstate_need_ast
678	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
679
680	lock
681	btsl	%eax, _checkstate_pending_ast
682	jc	1f
683
684	FAKE_MCOUNT(13*4(%esp))
685
686	/*
687	 * Giant locks do not come cheap.
688	 * A lot of cycles are going to be wasted here.
689	 */
690	call	_get_isrlock
691
692	AVCPL_LOCK
693#ifdef CPL_AND_CML
694	movl	_cml, %eax
695#else
696	movl	_cpl, %eax
697#endif
698	pushl	%eax
699	orl	$SWI_AST_PENDING, _ipending
700	AVCPL_UNLOCK
701	lock
702	incb	_intr_nesting_level
703	sti
704
705	pushl	$0
706
707	movl	_cpuid, %eax
708	lock
709	btrl	%eax, _checkstate_pending_ast
710	lock
711	btrl	%eax, CNAME(resched_cpus)
712	jnc	2f
713	movl	$1, CNAME(want_resched)
714	lock
715	incl	CNAME(want_resched_cnt)
7162:
717	lock
718	incl	CNAME(cpuast_cnt)
719	MEXITCOUNT
720	jmp	_doreti
7211:
722	/* We are already in the process of delivering an ast for this CPU */
723	POP_FRAME
724	iret
725
726
727/*
728 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
729 */
730
731	.text
732	SUPERALIGN_TEXT
733	.globl _Xforward_irq
734_Xforward_irq:
735	PUSH_FRAME
736	movl	$KDSEL, %eax
737	movl	%ax, %ds		/* use KERNEL data segment */
738	movl	%ax, %es
739	movl	$KPSEL, %eax
740	movl	%ax, %fs
741
742	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
743
744	FAKE_MCOUNT(13*4(%esp))
745
746	ISR_TRYLOCK
747	testl	%eax,%eax		/* Did we get the lock ? */
748	jz  1f				/* No */
749
750	lock
751	incl	CNAME(forward_irq_hitcnt)
752	cmpb	$4, _intr_nesting_level
753	jae	2f
754
755	AVCPL_LOCK
756#ifdef CPL_AND_CML
757	movl	_cml, %eax
758#else
759	movl	_cpl, %eax
760#endif
761	pushl	%eax
762	AVCPL_UNLOCK
763	lock
764	incb	_intr_nesting_level
765	sti
766
767	pushl	$0
768
769	MEXITCOUNT
770	jmp	_doreti			/* Handle forwarded interrupt */
7711:
772	lock
773	incl	CNAME(forward_irq_misscnt)
774	call	forward_irq	/* Oops, we've lost the isr lock */
775	MEXITCOUNT
776	POP_FRAME
777	iret
7782:
779	lock
780	incl	CNAME(forward_irq_toodeepcnt)
7813:
782	ISR_RELLOCK
783	MEXITCOUNT
784	POP_FRAME
785	iret
786
787/*
788 *
789 */
790forward_irq:
791	MCOUNT
792	cmpl	$0,_invltlb_ok
793	jz	4f
794
795	cmpl	$0, CNAME(forward_irq_enabled)
796	jz	4f
797
798	movl	_mp_lock,%eax
799	cmpl	$FREE_LOCK,%eax
800	jne	1f
801	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
8021:
803	shrl	$24,%eax
804	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
805	shll	$24,%ecx
806	movl	lapic_icr_hi, %eax
807	andl	$~APIC_ID_MASK, %eax
808	orl	%ecx, %eax
809	movl	%eax, lapic_icr_hi
810
8112:
812	movl	lapic_icr_lo, %eax
813	andl	$APIC_DELSTAT_MASK,%eax
814	jnz	2b
815	movl	lapic_icr_lo, %eax
816	andl	$APIC_RESV2_MASK, %eax
817	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
818	movl	%eax, lapic_icr_lo
8193:
820	movl	lapic_icr_lo, %eax
821	andl	$APIC_DELSTAT_MASK,%eax
822	jnz	3b
8234:
824	ret
825
826/*
827 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
828 *
829 *  - Signals its receipt.
830 *  - Waits for permission to restart.
831 *  - Signals its restart.
832 */
833
834	.text
835	SUPERALIGN_TEXT
836	.globl _Xcpustop
837_Xcpustop:
838	pushl	%ebp
839	movl	%esp, %ebp
840	pushl	%eax
841	pushl	%ecx
842	pushl	%edx
843	pushl	%ds			/* save current data segment */
844	pushl	%fs
845
846	movl	$KDSEL, %eax
847	movl	%ax, %ds		/* use KERNEL data segment */
848	movl	$KPSEL, %eax
849	movl	%ax, %fs
850
851	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
852
853	movl	_cpuid, %eax
854	imull	$PCB_SIZE, %eax
855	leal	CNAME(stoppcbs)(%eax), %eax
856	pushl	%eax
857	call	CNAME(savectx)		/* Save process context */
858	addl	$4, %esp
859
860
861	movl	_cpuid, %eax
862
863	lock
864	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8651:
866	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
867	jnc	1b
868
869	lock
870	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
871	lock
872	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
873
874	test	%eax, %eax
875	jnz	2f
876
877	movl	CNAME(cpustop_restartfunc), %eax
878	test	%eax, %eax
879	jz	2f
880	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
881
882	call	%eax
8832:
884	popl	%fs
885	popl	%ds			/* restore previous data segment */
886	popl	%edx
887	popl	%ecx
888	popl	%eax
889	movl	%ebp, %esp
890	popl	%ebp
891	iret
892
893
894MCOUNT_LABEL(bintr)
895	FAST_INTR(0,fastintr0)
896	FAST_INTR(1,fastintr1)
897	FAST_INTR(2,fastintr2)
898	FAST_INTR(3,fastintr3)
899	FAST_INTR(4,fastintr4)
900	FAST_INTR(5,fastintr5)
901	FAST_INTR(6,fastintr6)
902	FAST_INTR(7,fastintr7)
903	FAST_INTR(8,fastintr8)
904	FAST_INTR(9,fastintr9)
905	FAST_INTR(10,fastintr10)
906	FAST_INTR(11,fastintr11)
907	FAST_INTR(12,fastintr12)
908	FAST_INTR(13,fastintr13)
909	FAST_INTR(14,fastintr14)
910	FAST_INTR(15,fastintr15)
911	FAST_INTR(16,fastintr16)
912	FAST_INTR(17,fastintr17)
913	FAST_INTR(18,fastintr18)
914	FAST_INTR(19,fastintr19)
915	FAST_INTR(20,fastintr20)
916	FAST_INTR(21,fastintr21)
917	FAST_INTR(22,fastintr22)
918	FAST_INTR(23,fastintr23)
919	INTR(0,intr0)
920	INTR(1,intr1)
921	INTR(2,intr2)
922	INTR(3,intr3)
923	INTR(4,intr4)
924	INTR(5,intr5)
925	INTR(6,intr6)
926	INTR(7,intr7)
927	INTR(8,intr8)
928	INTR(9,intr9)
929	INTR(10,intr10)
930	INTR(11,intr11)
931	INTR(12,intr12)
932	INTR(13,intr13)
933	INTR(14,intr14)
934	INTR(15,intr15)
935	INTR(16,intr16)
936	INTR(17,intr17)
937	INTR(18,intr18)
938	INTR(19,intr19)
939	INTR(20,intr20)
940	INTR(21,intr21)
941	INTR(22,intr22)
942	INTR(23,intr23)
943MCOUNT_LABEL(eintr)
944
945	.data
946/*
947 * Addresses of interrupt handlers.
948 *  XresumeNN: Resumption addresses for HWIs.
949 */
950	.globl _ihandlers
951_ihandlers:
952/*
953 * used by:
954 *  ipl.s:	doreti_unpend
955 */
956	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
957	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
958	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
959	.long	Xresume12, Xresume13, Xresume14, Xresume15
960	.long	Xresume16, Xresume17, Xresume18, Xresume19
961	.long	Xresume20, Xresume21, Xresume22, Xresume23
962/*
963 * used by:
964 *  ipl.s:	doreti_unpend
965 *  apic_ipl.s:	splz_unpend
966 */
967	.long	_swi_null, swi_net, _swi_null, _swi_null
968	.long	_swi_vm, _swi_null, _softclock, swi_ast
969
970imasks:				/* masks for interrupt handlers */
971	.space	NHWI*4		/* padding; HWI masks are elsewhere */
972
973	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
974	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
975
976/* active flag for lazy masking */
977iactive:
978	.long	0
979
980#ifdef COUNT_XINVLTLB_HITS
981	.globl	_xhits
982_xhits:
983	.space	(NCPU * 4), 0
984#endif /* COUNT_XINVLTLB_HITS */
985
986/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
987	.globl _stopped_cpus, _started_cpus
988_stopped_cpus:
989	.long	0
990_started_cpus:
991	.long	0
992
993#ifdef BETTER_CLOCK
994	.globl _checkstate_probed_cpus
995_checkstate_probed_cpus:
996	.long	0
997#endif /* BETTER_CLOCK */
998	.globl _checkstate_need_ast
999_checkstate_need_ast:
1000	.long	0
1001_checkstate_pending_ast:
1002	.long	0
1003	.globl CNAME(forward_irq_misscnt)
1004	.globl CNAME(forward_irq_toodeepcnt)
1005	.globl CNAME(forward_irq_hitcnt)
1006	.globl CNAME(resched_cpus)
1007	.globl CNAME(want_resched_cnt)
1008	.globl CNAME(cpuast_cnt)
1009	.globl CNAME(cpustop_restartfunc)
1010CNAME(forward_irq_misscnt):
1011	.long 0
1012CNAME(forward_irq_hitcnt):
1013	.long 0
1014CNAME(forward_irq_toodeepcnt):
1015	.long 0
1016CNAME(resched_cpus):
1017	.long 0
1018CNAME(want_resched_cnt):
1019	.long 0
1020CNAME(cpuast_cnt):
1021	.long 0
1022CNAME(cpustop_restartfunc):
1023	.long 0
1024
1025
1026
1027	.globl	_apic_pin_trigger
1028_apic_pin_trigger:
1029	.long	0
1030
1031	.text
1032