apic_vector.s revision 48505
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.40 1999/06/16 03:53:52 tegge Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	pushl	%fs ;							\
62	movl	$KDSEL,%eax ;						\
63	movl	%ax,%ds ;						\
64	MAYBE_MOVW_AX_ES ;						\
65	movl	$KPSEL,%eax ;						\
66	movl	%ax,%fs ;						\
67	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
68	pushl	_intr_unit + (irq_num) * 4 ;				\
69	GET_FAST_INTR_LOCK ;						\
70	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
71	REL_FAST_INTR_LOCK ;						\
72	addl	$4, %esp ;						\
73	movl	$0, lapic_eoi ;						\
74	lock ; 								\
75	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
76	movl	_intr_countp + (irq_num) * 4, %eax ;			\
77	lock ; 								\
78	incl	(%eax) ;						\
79	MEXITCOUNT ;							\
80	popl	%fs ;							\
81	MAYBE_POPL_ES ;							\
82	popl	%ds ;							\
83	popl	%edx ;							\
84	popl	%ecx ;							\
85	popl	%eax ;							\
86	iret
87
88#else /* FAST_WITHOUTCPL */
89
90#define	FAST_INTR(irq_num, vec_name)					\
91	.text ;								\
92	SUPERALIGN_TEXT ;						\
93IDTVEC(vec_name) ;							\
94	pushl	%eax ;		/* save only call-used registers */	\
95	pushl	%ecx ;							\
96	pushl	%edx ;							\
97	pushl	%ds ;							\
98	MAYBE_PUSHL_ES ;						\
99	pushl	%fs ;							\
100	movl	$KDSEL, %eax ;						\
101	movl	%ax, %ds ;						\
102	MAYBE_MOVW_AX_ES ;						\
103	movl	$KPSEL, %eax ;						\
104	movl	%ax, %fs ;						\
105	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
106	GET_FAST_INTR_LOCK ;						\
107	pushl	_intr_unit + (irq_num) * 4 ;				\
108	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
109	addl	$4, %esp ;						\
110	movl	$0, lapic_eoi ;						\
111	lock ; 								\
112	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
113	movl	_intr_countp + (irq_num) * 4,%eax ;			\
114	lock ; 								\
115	incl	(%eax) ;						\
116	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
117	notl	%eax ;							\
118	andl	_ipending, %eax ;					\
119	jne	2f ; 		/* yes, maybe handle them */		\
1201: ;									\
121	MEXITCOUNT ;							\
122	REL_FAST_INTR_LOCK ;						\
123	popl	%fs ;							\
124	MAYBE_POPL_ES ;							\
125	popl	%ds ;							\
126	popl	%edx ;							\
127	popl	%ecx ;							\
128	popl	%eax ;							\
129	iret ;								\
130;									\
131	ALIGN_TEXT ;							\
1322: ;									\
133	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
134	jae	1b ;		/* no, return */			\
135	movl	_cpl, %eax ;						\
136	/* XXX next line is probably unnecessary now. */		\
137	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
138	lock ; 								\
139	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
140	sti ;			/* to do this as early as possible */	\
141	popl	%fs ;		/* discard most of thin frame ... */	\
142	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
143	popl	%ecx ;		/* ... original %ds ... */		\
144	popl	%edx ;							\
145	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
146	pushal ;		/* build fat frame (grrr) ... */	\
147	pushl	%ecx ;		/* ... actually %ds ... */		\
148	pushl	%es ;							\
149	pushl	%fs ;
150	movl	$KDSEL, %eax ;						\
151	movl	%ax, %es ;						\
152	movl	$KPSEL, %eax ;
153	movl	%ax, %fs ;
154	movl	(3+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
155	movl	%ecx, (3+6)*4(%esp) ;	/* ... to fat frame ... */	\
156	movl	(3+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
157	pushl	%eax ;							\
158	subl	$4, %esp ;	/* junk for unit number */		\
159	MEXITCOUNT ;							\
160	jmp	_doreti
161
162#endif /** FAST_WITHOUTCPL */
163
164
165/*
166 *
167 */
168#define PUSH_FRAME							\
169	pushl	$0 ;		/* dummy error code */			\
170	pushl	$0 ;		/* dummy trap type */			\
171	pushal ;							\
172	pushl	%ds ;		/* save data and extra segments ... */	\
173	pushl	%es ;							\
174	pushl	%fs
175
176#define POP_FRAME							\
177	popl	%fs ;							\
178	popl	%es ;							\
179	popl	%ds ;							\
180	popal ;								\
181	addl	$4+4,%esp
182
183#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
184#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
185
186#define MASK_IRQ(irq_num)						\
187	IMASK_LOCK ;				/* into critical reg */	\
188	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
189	jne	7f ;			/* masked, don't mask */	\
190	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
191	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
192	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
193	movl	%eax, (%ecx) ;			/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1977: ;						/* already masked */	\
198	IMASK_UNLOCK
199/*
200 * Test to see whether we are handling an edge or level triggered INT.
201 *  Level-triggered INTs must still be masked as we don't clear the source,
202 *  and the EOI cycle would cause redundant INTs to occur.
203 */
204#define MASK_LEVEL_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
206	jz	9f ;				/* edge, don't mask */	\
207	MASK_IRQ(irq_num) ;						\
2089:
209
210
211#ifdef APIC_INTR_REORDER
212#define EOI_IRQ(irq_num)						\
213	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
214	movl	(%eax), %eax ;						\
215	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
216	jz	9f ;				/* not active */	\
217	movl	$0, lapic_eoi ;						\
218	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2199:
220
221#else
222#define EOI_IRQ(irq_num)						\
223	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
224	jz	9f	;			/* not active */	\
225	movl	$0, lapic_eoi;						\
226	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2279:
228#endif
229
230
231/*
232 * Test to see if the source is currntly masked, clear if so.
233 */
234#define UNMASK_IRQ(irq_num)					\
235	IMASK_LOCK ;				/* into critical reg */	\
236	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
237	je	7f ;			/* bit clear, not masked */	\
238	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
239	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
240	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
241	movl	%eax,(%ecx) ;			/* write the index */	\
242	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
243	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
244	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2457: ;									\
246	IMASK_UNLOCK
247
248#ifdef INTR_SIMPLELOCK
249#define ENLOCK
250#define DELOCK
251#define LATELOCK call	_get_isrlock
252#else
253#define ENLOCK \
254	ISR_TRYLOCK ;		/* XXX this is going away... */		\
255	testl	%eax, %eax ;			/* did we get it? */	\
256	jz	3f
257#define DELOCK	ISR_RELLOCK
258#define LATELOCK
259#endif
260
261#ifdef APIC_INTR_DIAGNOSTIC
262#ifdef APIC_INTR_DIAGNOSTIC_IRQ
263log_intr_event:
264	pushf
265	cli
266	pushl	$CNAME(apic_itrace_debuglock)
267	call	CNAME(s_lock_np)
268	addl	$4, %esp
269	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
270	andl	$32767, %ecx
271	movl	_cpuid, %eax
272	shll	$8,	%eax
273	orl	8(%esp), %eax
274	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
275	incl	%ecx
276	andl	$32767, %ecx
277	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
278	pushl	$CNAME(apic_itrace_debuglock)
279	call	CNAME(s_unlock_np)
280	addl	$4, %esp
281	popf
282	ret
283
284
285#define APIC_ITRACE(name, irq_num, id)					\
286	lock ;					/* MP-safe */		\
287	incl	CNAME(name) + (irq_num) * 4 ;				\
288	pushl	%eax ;							\
289	pushl	%ecx ;							\
290	pushl	%edx ;							\
291	movl	$(irq_num), %eax ;					\
292	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
293	jne	7f ;							\
294	pushl	$id ;							\
295	call	log_intr_event ;					\
296	addl	$4, %esp ;						\
2977: ;									\
298	popl	%edx ;							\
299	popl	%ecx ;							\
300	popl	%eax
301#else
302#define APIC_ITRACE(name, irq_num, id)					\
303	lock ;					/* MP-safe */		\
304	incl	CNAME(name) + (irq_num) * 4
305#endif
306
307#define APIC_ITRACE_ENTER 1
308#define APIC_ITRACE_EOI 2
309#define APIC_ITRACE_TRYISRLOCK 3
310#define APIC_ITRACE_GOTISRLOCK 4
311#define APIC_ITRACE_ENTER2 5
312#define APIC_ITRACE_LEAVE 6
313#define APIC_ITRACE_UNMASK 7
314#define APIC_ITRACE_ACTIVE 8
315#define APIC_ITRACE_MASKED 9
316#define APIC_ITRACE_NOISRLOCK 10
317#define APIC_ITRACE_MASKED2 11
318#define APIC_ITRACE_SPLZ 12
319#define APIC_ITRACE_DORETI 13
320
321#else
322#define APIC_ITRACE(name, irq_num, id)
323#endif
324
325#ifdef CPL_AND_CML
326
327#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
328	.text ;								\
329	SUPERALIGN_TEXT ;						\
330/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
331IDTVEC(vec_name) ;							\
332	PUSH_FRAME ;							\
333	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
334	movl	%ax, %ds ;						\
335	movl	%ax, %es ;						\
336	movl	$KPSEL, %eax ;						\
337	movl	%ax, %fs ;						\
338;									\
339	maybe_extra_ipending ;						\
340;									\
341	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
342	lock ;					/* MP-safe */		\
343	btsl	$(irq_num), iactive ;		/* lazy masking */	\
344	jc	1f ;				/* already active */	\
345;									\
346	MASK_LEVEL_IRQ(irq_num) ;					\
347	EOI_IRQ(irq_num) ;						\
3480: ;									\
349	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
350	ENLOCK ;							\
351;									\
352	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
353	AVCPL_LOCK ;				/* MP-safe */		\
354	testl	$IRQ_BIT(irq_num), _cpl ;				\
355	jne	2f ;				/* this INT masked */	\
356	testl	$IRQ_BIT(irq_num), _cml ;				\
357	jne	2f ;				/* this INT masked */	\
358	orl	$IRQ_BIT(irq_num), _cil ;				\
359	AVCPL_UNLOCK ;							\
360;									\
361	incb	_intr_nesting_level ;					\
362;	 								\
363  /* entry point used by doreti_unpend for HWIs. */			\
364__CONCAT(Xresume,irq_num): ;						\
365	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
366	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
367	movl	_intr_countp + (irq_num) * 4, %eax ;			\
368	lock ;	incl	(%eax) ;					\
369;									\
370	AVCPL_LOCK ;				/* MP-safe */		\
371	movl	_cml, %eax ;						\
372	pushl	%eax ;							\
373	orl	_intr_mask + (irq_num) * 4, %eax ;			\
374	movl	%eax, _cml ;						\
375	AVCPL_UNLOCK ;							\
376;									\
377	pushl	_intr_unit + (irq_num) * 4 ;				\
378	incl	_inside_intr ;						\
379	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
380	sti ;								\
381	call	*_intr_handler + (irq_num) * 4 ;			\
382	cli ;								\
383	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
384	decl	_inside_intr ;						\
385;									\
386	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
387	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
388	UNMASK_IRQ(irq_num) ;						\
389	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
390	sti ;				/* doreti repeats cli/sti */	\
391	MEXITCOUNT ;							\
392	LATELOCK ;							\
393	jmp	_doreti ;						\
394;									\
395	ALIGN_TEXT ;							\
3961: ;						/* active */		\
397	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
398	MASK_IRQ(irq_num) ;						\
399	EOI_IRQ(irq_num) ;						\
400	AVCPL_LOCK ;				/* MP-safe */		\
401	lock ;								\
402	orl	$IRQ_BIT(irq_num), _ipending ;				\
403	AVCPL_UNLOCK ;							\
404	lock ;								\
405	btsl	$(irq_num), iactive ;		/* still active */	\
406	jnc	0b ;				/* retry */		\
407	POP_FRAME ;							\
408	iret ;								\
409;									\
410	ALIGN_TEXT ;							\
4112: ;						/* masked by cpl|cml */	\
412	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
413	lock ;								\
414	orl	$IRQ_BIT(irq_num), _ipending ;				\
415	AVCPL_UNLOCK ;							\
416	DELOCK ;		/* XXX this is going away... */		\
417	POP_FRAME ;							\
418	iret ;								\
419	ALIGN_TEXT ;							\
4203: ; 			/* other cpu has isr lock */			\
421	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
422	AVCPL_LOCK ;				/* MP-safe */		\
423	lock ;								\
424	orl	$IRQ_BIT(irq_num), _ipending ;				\
425	testl	$IRQ_BIT(irq_num), _cpl ;				\
426	jne	4f ;				/* this INT masked */	\
427	testl	$IRQ_BIT(irq_num), _cml ;				\
428	jne	4f ;				/* this INT masked */	\
429	orl	$IRQ_BIT(irq_num), _cil ;				\
430	AVCPL_UNLOCK ;							\
431	call	forward_irq ;	/* forward irq to lock holder */	\
432	POP_FRAME ;	 			/* and return */	\
433	iret ;								\
434	ALIGN_TEXT ;							\
4354: ;	 					/* blocked */		\
436	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
437	AVCPL_UNLOCK ;							\
438	POP_FRAME ;	 			/* and return */	\
439	iret
440
441#else /* CPL_AND_CML */
442
443
444#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
445	.text ;								\
446	SUPERALIGN_TEXT ;						\
447/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
448IDTVEC(vec_name) ;							\
449	PUSH_FRAME ;							\
450	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
451	movl	%ax, %ds ;						\
452	movl	%ax, %es ;						\
453	movl	$KPSEL, %eax ;						\
454	movl	%ax, %fs ;						\
455;									\
456	maybe_extra_ipending ;						\
457;									\
458	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
459	lock ;					/* MP-safe */		\
460	btsl	$(irq_num), iactive ;		/* lazy masking */	\
461	jc	1f ;				/* already active */	\
462;									\
463	MASK_LEVEL_IRQ(irq_num) ;					\
464	EOI_IRQ(irq_num) ;						\
4650: ;									\
466	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
467	ISR_TRYLOCK ;		/* XXX this is going away... */		\
468	testl	%eax, %eax ;			/* did we get it? */	\
469	jz	3f ;				/* no */		\
470;									\
471	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
472	AVCPL_LOCK ;				/* MP-safe */		\
473	testl	$IRQ_BIT(irq_num), _cpl ;				\
474	jne	2f ;				/* this INT masked */	\
475	AVCPL_UNLOCK ;							\
476;									\
477	incb	_intr_nesting_level ;					\
478;	 								\
479  /* entry point used by doreti_unpend for HWIs. */			\
480__CONCAT(Xresume,irq_num): ;						\
481	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
482	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
483	movl	_intr_countp + (irq_num) * 4, %eax ;			\
484	lock ;	incl	(%eax) ;					\
485;									\
486	AVCPL_LOCK ;				/* MP-safe */		\
487	movl	_cpl, %eax ;						\
488	pushl	%eax ;							\
489	orl	_intr_mask + (irq_num) * 4, %eax ;			\
490	movl	%eax, _cpl ;						\
491	lock ;								\
492	andl	$~IRQ_BIT(irq_num), _ipending ;				\
493	AVCPL_UNLOCK ;							\
494;									\
495	pushl	_intr_unit + (irq_num) * 4 ;				\
496	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
497	sti ;								\
498	call	*_intr_handler + (irq_num) * 4 ;			\
499	cli ;								\
500	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
501;									\
502	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
503	UNMASK_IRQ(irq_num) ;						\
504	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
505	sti ;				/* doreti repeats cli/sti */	\
506	MEXITCOUNT ;							\
507	jmp	_doreti ;						\
508;									\
509	ALIGN_TEXT ;							\
5101: ;						/* active  */		\
511	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
512	MASK_IRQ(irq_num) ;						\
513	EOI_IRQ(irq_num) ;						\
514	AVCPL_LOCK ;				/* MP-safe */		\
515	lock ;								\
516	orl	$IRQ_BIT(irq_num), _ipending ;				\
517	AVCPL_UNLOCK ;							\
518	lock ;								\
519	btsl	$(irq_num), iactive ;		/* still active */	\
520	jnc	0b ;				/* retry */		\
521	POP_FRAME ;							\
522	iret ;		/* XXX:	 iactive bit might be 0 now */		\
523	ALIGN_TEXT ;							\
5242: ;				/* masked by cpl, leave iactive set */	\
525	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
526	lock ;								\
527	orl	$IRQ_BIT(irq_num), _ipending ;				\
528	AVCPL_UNLOCK ;							\
529	ISR_RELLOCK ;		/* XXX this is going away... */		\
530	POP_FRAME ;							\
531	iret ;								\
532	ALIGN_TEXT ;							\
5333: ; 			/* other cpu has isr lock */			\
534	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
535	AVCPL_LOCK ;				/* MP-safe */		\
536	lock ;								\
537	orl	$IRQ_BIT(irq_num), _ipending ;				\
538	testl	$IRQ_BIT(irq_num), _cpl ;				\
539	jne	4f ;				/* this INT masked */	\
540	AVCPL_UNLOCK ;							\
541	call	forward_irq ;	 /* forward irq to lock holder */	\
542	POP_FRAME ;	 			/* and return */	\
543	iret ;								\
544	ALIGN_TEXT ;							\
5454: ;	 					/* blocked */		\
546	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
547	AVCPL_UNLOCK ;							\
548	POP_FRAME ;	 			/* and return */	\
549	iret
550
551#endif /* CPL_AND_CML */
552
553
554/*
555 * Handle "spurious INTerrupts".
556 * Notes:
557 *  This is different than the "spurious INTerrupt" generated by an
558 *   8259 PIC for missing INTs.  See the APIC documentation for details.
559 *  This routine should NOT do an 'EOI' cycle.
560 */
561	.text
562	SUPERALIGN_TEXT
563	.globl _Xspuriousint
564_Xspuriousint:
565
566	/* No EOI cycle used here */
567
568	iret
569
570
571/*
572 * Handle TLB shootdowns.
573 */
574	.text
575	SUPERALIGN_TEXT
576	.globl	_Xinvltlb
577_Xinvltlb:
578	pushl	%eax
579
580#ifdef COUNT_XINVLTLB_HITS
581	pushl	%fs
582	movl	$KPSEL, %eax
583	movl	%ax, %fs
584	movl	_cpuid, %eax
585	popl	%fs
586	ss
587	incl	_xhits(,%eax,4)
588#endif /* COUNT_XINVLTLB_HITS */
589
590	movl	%cr3, %eax		/* invalidate the TLB */
591	movl	%eax, %cr3
592
593	ss				/* stack segment, avoid %ds load */
594	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
595
596	popl	%eax
597	iret
598
599
600#ifdef BETTER_CLOCK
601
602/*
603 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
604 *
605 *  - Stores current cpu state in checkstate_cpustate[cpuid]
606 *      0 == user, 1 == sys, 2 == intr
607 *  - Stores current process in checkstate_curproc[cpuid]
608 *
609 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
610 *
611 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
612 */
613
614	.text
615	SUPERALIGN_TEXT
616	.globl _Xcpucheckstate
617	.globl _checkstate_cpustate
618	.globl _checkstate_curproc
619	.globl _checkstate_pc
620_Xcpucheckstate:
621	pushl	%eax
622	pushl	%ebx
623	pushl	%ds			/* save current data segment */
624	pushl	%fs
625
626	movl	$KDSEL, %eax
627	movl	%ax, %ds		/* use KERNEL data segment */
628	movl	$KPSEL, %eax
629	movl	%ax, %fs
630
631	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
632
633	movl	$0, %ebx
634	movl	20(%esp), %eax
635	andl	$3, %eax
636	cmpl	$3, %eax
637	je	1f
638	testl	$PSL_VM, 24(%esp)
639	jne	1f
640	incl	%ebx			/* system or interrupt */
641#ifdef CPL_AND_CML
642	cmpl	$0, _inside_intr
643	je	1f
644	incl	%ebx			/* interrupt */
645#endif
6461:
647	movl	_cpuid, %eax
648	movl	%ebx, _checkstate_cpustate(,%eax,4)
649	movl	_curproc, %ebx
650	movl	%ebx, _checkstate_curproc(,%eax,4)
651	movl	16(%esp), %ebx
652	movl	%ebx, _checkstate_pc(,%eax,4)
653
654	lock				/* checkstate_probed_cpus |= (1<<id) */
655	btsl	%eax, _checkstate_probed_cpus
656
657	popl	%fs
658	popl	%ds			/* restore previous data segment */
659	popl	%ebx
660	popl	%eax
661	iret
662
663#endif /* BETTER_CLOCK */
664
665/*
666 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
667 *
668 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
669 *
670 *  - We need a better method of triggering asts on other cpus.
671 */
672
673	.text
674	SUPERALIGN_TEXT
675	.globl _Xcpuast
676_Xcpuast:
677	PUSH_FRAME
678	movl	$KDSEL, %eax
679	movl	%ax, %ds		/* use KERNEL data segment */
680	movl	%ax, %es
681	movl	$KPSEL, %eax
682	movl	%ax, %fs
683
684	movl	_cpuid, %eax
685	lock				/* checkstate_need_ast &= ~(1<<id) */
686	btrl	%eax, _checkstate_need_ast
687	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
688
689	lock
690	btsl	%eax, _checkstate_pending_ast
691	jc	1f
692
693	FAKE_MCOUNT(13*4(%esp))
694
695	/*
696	 * Giant locks do not come cheap.
697	 * A lot of cycles are going to be wasted here.
698	 */
699	call	_get_isrlock
700
701	AVCPL_LOCK
702#ifdef CPL_AND_CML
703	movl	_cml, %eax
704#else
705	movl	_cpl, %eax
706#endif
707	pushl	%eax
708	lock
709	orl	$SWI_AST_PENDING, _ipending
710	AVCPL_UNLOCK
711	lock
712	incb	_intr_nesting_level
713	sti
714
715	pushl	$0
716
717	movl	_cpuid, %eax
718	lock
719	btrl	%eax, _checkstate_pending_ast
720	lock
721	btrl	%eax, CNAME(resched_cpus)
722	jnc	2f
723	movl	$1, CNAME(want_resched)
724	lock
725	incl	CNAME(want_resched_cnt)
7262:
727	lock
728	incl	CNAME(cpuast_cnt)
729	MEXITCOUNT
730	jmp	_doreti
7311:
732	/* We are already in the process of delivering an ast for this CPU */
733	POP_FRAME
734	iret
735
736
737/*
738 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
739 */
740
741	.text
742	SUPERALIGN_TEXT
743	.globl _Xforward_irq
744_Xforward_irq:
745	PUSH_FRAME
746	movl	$KDSEL, %eax
747	movl	%ax, %ds		/* use KERNEL data segment */
748	movl	%ax, %es
749	movl	$KPSEL, %eax
750	movl	%ax, %fs
751
752	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
753
754	FAKE_MCOUNT(13*4(%esp))
755
756	ISR_TRYLOCK
757	testl	%eax,%eax		/* Did we get the lock ? */
758	jz  1f				/* No */
759
760	lock
761	incl	CNAME(forward_irq_hitcnt)
762	cmpb	$4, _intr_nesting_level
763	jae	2f
764
765	AVCPL_LOCK
766#ifdef CPL_AND_CML
767	movl	_cml, %eax
768#else
769	movl	_cpl, %eax
770#endif
771	pushl	%eax
772	AVCPL_UNLOCK
773	lock
774	incb	_intr_nesting_level
775	sti
776
777	pushl	$0
778
779	MEXITCOUNT
780	jmp	_doreti			/* Handle forwarded interrupt */
7811:
782	lock
783	incl	CNAME(forward_irq_misscnt)
784	call	forward_irq	/* Oops, we've lost the isr lock */
785	MEXITCOUNT
786	POP_FRAME
787	iret
7882:
789	lock
790	incl	CNAME(forward_irq_toodeepcnt)
7913:
792	ISR_RELLOCK
793	MEXITCOUNT
794	POP_FRAME
795	iret
796
797/*
798 *
799 */
800forward_irq:
801	MCOUNT
802	cmpl	$0,_invltlb_ok
803	jz	4f
804
805	cmpl	$0, CNAME(forward_irq_enabled)
806	jz	4f
807
808	movl	_mp_lock,%eax
809	cmpl	$FREE_LOCK,%eax
810	jne	1f
811	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
8121:
813	shrl	$24,%eax
814	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
815	shll	$24,%ecx
816	movl	lapic_icr_hi, %eax
817	andl	$~APIC_ID_MASK, %eax
818	orl	%ecx, %eax
819	movl	%eax, lapic_icr_hi
820
8212:
822	movl	lapic_icr_lo, %eax
823	andl	$APIC_DELSTAT_MASK,%eax
824	jnz	2b
825	movl	lapic_icr_lo, %eax
826	andl	$APIC_RESV2_MASK, %eax
827	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
828	movl	%eax, lapic_icr_lo
8293:
830	movl	lapic_icr_lo, %eax
831	andl	$APIC_DELSTAT_MASK,%eax
832	jnz	3b
8334:
834	ret
835
836/*
837 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
838 *
839 *  - Signals its receipt.
840 *  - Waits for permission to restart.
841 *  - Signals its restart.
842 */
843
844	.text
845	SUPERALIGN_TEXT
846	.globl _Xcpustop
847_Xcpustop:
848	pushl	%ebp
849	movl	%esp, %ebp
850	pushl	%eax
851	pushl	%ecx
852	pushl	%edx
853	pushl	%ds			/* save current data segment */
854	pushl	%fs
855
856	movl	$KDSEL, %eax
857	movl	%ax, %ds		/* use KERNEL data segment */
858	movl	$KPSEL, %eax
859	movl	%ax, %fs
860
861	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
862
863	movl	_cpuid, %eax
864	imull	$PCB_SIZE, %eax
865	leal	CNAME(stoppcbs)(%eax), %eax
866	pushl	%eax
867	call	CNAME(savectx)		/* Save process context */
868	addl	$4, %esp
869
870
871	movl	_cpuid, %eax
872
873	lock
874	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8751:
876	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
877	jnc	1b
878
879	lock
880	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
881	lock
882	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
883
884	test	%eax, %eax
885	jnz	2f
886
887	movl	CNAME(cpustop_restartfunc), %eax
888	test	%eax, %eax
889	jz	2f
890	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
891
892	call	%eax
8932:
894	popl	%fs
895	popl	%ds			/* restore previous data segment */
896	popl	%edx
897	popl	%ecx
898	popl	%eax
899	movl	%ebp, %esp
900	popl	%ebp
901	iret
902
903
904MCOUNT_LABEL(bintr)
905	FAST_INTR(0,fastintr0)
906	FAST_INTR(1,fastintr1)
907	FAST_INTR(2,fastintr2)
908	FAST_INTR(3,fastintr3)
909	FAST_INTR(4,fastintr4)
910	FAST_INTR(5,fastintr5)
911	FAST_INTR(6,fastintr6)
912	FAST_INTR(7,fastintr7)
913	FAST_INTR(8,fastintr8)
914	FAST_INTR(9,fastintr9)
915	FAST_INTR(10,fastintr10)
916	FAST_INTR(11,fastintr11)
917	FAST_INTR(12,fastintr12)
918	FAST_INTR(13,fastintr13)
919	FAST_INTR(14,fastintr14)
920	FAST_INTR(15,fastintr15)
921	FAST_INTR(16,fastintr16)
922	FAST_INTR(17,fastintr17)
923	FAST_INTR(18,fastintr18)
924	FAST_INTR(19,fastintr19)
925	FAST_INTR(20,fastintr20)
926	FAST_INTR(21,fastintr21)
927	FAST_INTR(22,fastintr22)
928	FAST_INTR(23,fastintr23)
929#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
930	INTR(0,intr0, CLKINTR_PENDING)
931	INTR(1,intr1,)
932	INTR(2,intr2,)
933	INTR(3,intr3,)
934	INTR(4,intr4,)
935	INTR(5,intr5,)
936	INTR(6,intr6,)
937	INTR(7,intr7,)
938	INTR(8,intr8,)
939	INTR(9,intr9,)
940	INTR(10,intr10,)
941	INTR(11,intr11,)
942	INTR(12,intr12,)
943	INTR(13,intr13,)
944	INTR(14,intr14,)
945	INTR(15,intr15,)
946	INTR(16,intr16,)
947	INTR(17,intr17,)
948	INTR(18,intr18,)
949	INTR(19,intr19,)
950	INTR(20,intr20,)
951	INTR(21,intr21,)
952	INTR(22,intr22,)
953	INTR(23,intr23,)
954MCOUNT_LABEL(eintr)
955
956	.data
957/*
958 * Addresses of interrupt handlers.
959 *  XresumeNN: Resumption addresses for HWIs.
960 */
961	.globl _ihandlers
962_ihandlers:
963/*
964 * used by:
965 *  ipl.s:	doreti_unpend
966 */
967	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
968	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
969	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
970	.long	Xresume12, Xresume13, Xresume14, Xresume15
971	.long	Xresume16, Xresume17, Xresume18, Xresume19
972	.long	Xresume20, Xresume21, Xresume22, Xresume23
973/*
974 * used by:
975 *  ipl.s:	doreti_unpend
976 *  apic_ipl.s:	splz_unpend
977 */
978	.long	_swi_null, swi_net, _swi_null, _swi_null
979	.long	_swi_vm, _swi_null, _softclock, swi_ast
980
981imasks:				/* masks for interrupt handlers */
982	.space	NHWI*4		/* padding; HWI masks are elsewhere */
983
984	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
985	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
986
987/* active flag for lazy masking */
988iactive:
989	.long	0
990
991#ifdef COUNT_XINVLTLB_HITS
992	.globl	_xhits
993_xhits:
994	.space	(NCPU * 4), 0
995#endif /* COUNT_XINVLTLB_HITS */
996
997/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
998	.globl _stopped_cpus, _started_cpus
999_stopped_cpus:
1000	.long	0
1001_started_cpus:
1002	.long	0
1003
1004#ifdef BETTER_CLOCK
1005	.globl _checkstate_probed_cpus
1006_checkstate_probed_cpus:
1007	.long	0
1008#endif /* BETTER_CLOCK */
1009	.globl _checkstate_need_ast
1010_checkstate_need_ast:
1011	.long	0
1012_checkstate_pending_ast:
1013	.long	0
1014	.globl CNAME(forward_irq_misscnt)
1015	.globl CNAME(forward_irq_toodeepcnt)
1016	.globl CNAME(forward_irq_hitcnt)
1017	.globl CNAME(resched_cpus)
1018	.globl CNAME(want_resched_cnt)
1019	.globl CNAME(cpuast_cnt)
1020	.globl CNAME(cpustop_restartfunc)
1021CNAME(forward_irq_misscnt):
1022	.long 0
1023CNAME(forward_irq_hitcnt):
1024	.long 0
1025CNAME(forward_irq_toodeepcnt):
1026	.long 0
1027CNAME(resched_cpus):
1028	.long 0
1029CNAME(want_resched_cnt):
1030	.long 0
1031CNAME(cpuast_cnt):
1032	.long 0
1033CNAME(cpustop_restartfunc):
1034	.long 0
1035
1036
1037
1038	.globl	_apic_pin_trigger
1039_apic_pin_trigger:
1040	.long	0
1041
1042	.text
1043