apic_vector.s revision 48729
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.41 1999/07/03 06:33:47 alc Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	pushl	%fs ;							\
62	movl	$KDSEL,%eax ;						\
63	movl	%ax,%ds ;						\
64	MAYBE_MOVW_AX_ES ;						\
65	movl	$KPSEL,%eax ;						\
66	movl	%ax,%fs ;						\
67	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
68	pushl	_intr_unit + (irq_num) * 4 ;				\
69	GET_FAST_INTR_LOCK ;						\
70	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
71	REL_FAST_INTR_LOCK ;						\
72	addl	$4, %esp ;						\
73	movl	$0, lapic_eoi ;						\
74	lock ; 								\
75	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
76	movl	_intr_countp + (irq_num) * 4, %eax ;			\
77	lock ; 								\
78	incl	(%eax) ;						\
79	MEXITCOUNT ;							\
80	popl	%fs ;							\
81	MAYBE_POPL_ES ;							\
82	popl	%ds ;							\
83	popl	%edx ;							\
84	popl	%ecx ;							\
85	popl	%eax ;							\
86	iret
87
88#else /* FAST_WITHOUTCPL */
89
90#define	FAST_INTR(irq_num, vec_name)					\
91	.text ;								\
92	SUPERALIGN_TEXT ;						\
93IDTVEC(vec_name) ;							\
94	pushl	%eax ;		/* save only call-used registers */	\
95	pushl	%ecx ;							\
96	pushl	%edx ;							\
97	pushl	%ds ;							\
98	MAYBE_PUSHL_ES ;						\
99	pushl	%fs ;							\
100	movl	$KDSEL, %eax ;						\
101	movl	%ax, %ds ;						\
102	MAYBE_MOVW_AX_ES ;						\
103	movl	$KPSEL, %eax ;						\
104	movl	%ax, %fs ;						\
105	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
106	GET_FAST_INTR_LOCK ;						\
107	pushl	_intr_unit + (irq_num) * 4 ;				\
108	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
109	addl	$4, %esp ;						\
110	movl	$0, lapic_eoi ;						\
111	lock ; 								\
112	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
113	movl	_intr_countp + (irq_num) * 4,%eax ;			\
114	lock ; 								\
115	incl	(%eax) ;						\
116	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
117	notl	%eax ;							\
118	andl	_ipending, %eax ;					\
119	jne	2f ; 		/* yes, maybe handle them */		\
1201: ;									\
121	MEXITCOUNT ;							\
122	REL_FAST_INTR_LOCK ;						\
123	popl	%fs ;							\
124	MAYBE_POPL_ES ;							\
125	popl	%ds ;							\
126	popl	%edx ;							\
127	popl	%ecx ;							\
128	popl	%eax ;							\
129	iret ;								\
130;									\
131	ALIGN_TEXT ;							\
1322: ;									\
133	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
134	jae	1b ;		/* no, return */			\
135	movl	_cpl, %eax ;						\
136	/* XXX next line is probably unnecessary now. */		\
137	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
138	lock ; 								\
139	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
140	sti ;			/* to do this as early as possible */	\
141	popl	%fs ;		/* discard most of thin frame ... */	\
142	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
143	popl	%ecx ;		/* ... original %ds ... */		\
144	popl	%edx ;							\
145	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
146	pushal ;		/* build fat frame (grrr) ... */	\
147	pushl	%ecx ;		/* ... actually %ds ... */		\
148	pushl	%es ;							\
149	pushl	%fs ;
150	movl	$KDSEL, %eax ;						\
151	movl	%ax, %es ;						\
152	movl	$KPSEL, %eax ;
153	movl	%ax, %fs ;
154	movl	(3+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
155	movl	%ecx, (3+6)*4(%esp) ;	/* ... to fat frame ... */	\
156	movl	(3+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
157	pushl	%eax ;							\
158	subl	$4, %esp ;	/* junk for unit number */		\
159	MEXITCOUNT ;							\
160	jmp	_doreti
161
162#endif /** FAST_WITHOUTCPL */
163
164
165/*
166 *
167 */
168#define PUSH_FRAME							\
169	pushl	$0 ;		/* dummy error code */			\
170	pushl	$0 ;		/* dummy trap type */			\
171	pushal ;							\
172	pushl	%ds ;		/* save data and extra segments ... */	\
173	pushl	%es ;							\
174	pushl	%fs
175
176#define POP_FRAME							\
177	popl	%fs ;							\
178	popl	%es ;							\
179	popl	%ds ;							\
180	popal ;								\
181	addl	$4+4,%esp
182
183#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
184#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
185
186#define MASK_IRQ(irq_num)						\
187	IMASK_LOCK ;				/* into critical reg */	\
188	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
189	jne	7f ;			/* masked, don't mask */	\
190	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
191	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
192	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
193	movl	%eax, (%ecx) ;			/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1977: ;						/* already masked */	\
198	IMASK_UNLOCK
199/*
200 * Test to see whether we are handling an edge or level triggered INT.
201 *  Level-triggered INTs must still be masked as we don't clear the source,
202 *  and the EOI cycle would cause redundant INTs to occur.
203 */
204#define MASK_LEVEL_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
206	jz	9f ;				/* edge, don't mask */	\
207	MASK_IRQ(irq_num) ;						\
2089:
209
210
211#ifdef APIC_INTR_REORDER
212#define EOI_IRQ(irq_num)						\
213	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
214	movl	(%eax), %eax ;						\
215	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
216	jz	9f ;				/* not active */	\
217	movl	$0, lapic_eoi ;						\
218	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2199:
220
221#else
222#define EOI_IRQ(irq_num)						\
223	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
224	jz	9f	;			/* not active */	\
225	movl	$0, lapic_eoi;						\
226	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2279:
228#endif
229
230
231/*
232 * Test to see if the source is currntly masked, clear if so.
233 */
234#define UNMASK_IRQ(irq_num)					\
235	IMASK_LOCK ;				/* into critical reg */	\
236	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
237	je	7f ;			/* bit clear, not masked */	\
238	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
239	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
240	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
241	movl	%eax,(%ecx) ;			/* write the index */	\
242	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
243	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
244	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2457: ;									\
246	IMASK_UNLOCK
247
248#ifdef INTR_SIMPLELOCK
249#define ENLOCK
250#define DELOCK
251#define LATELOCK call	_get_isrlock
252#else
253#define ENLOCK \
254	ISR_TRYLOCK ;		/* XXX this is going away... */		\
255	testl	%eax, %eax ;			/* did we get it? */	\
256	jz	3f
257#define DELOCK	ISR_RELLOCK
258#define LATELOCK
259#endif
260
261#ifdef APIC_INTR_DIAGNOSTIC
262#ifdef APIC_INTR_DIAGNOSTIC_IRQ
263log_intr_event:
264	pushf
265	cli
266	pushl	$CNAME(apic_itrace_debuglock)
267	call	CNAME(s_lock_np)
268	addl	$4, %esp
269	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
270	andl	$32767, %ecx
271	movl	_cpuid, %eax
272	shll	$8,	%eax
273	orl	8(%esp), %eax
274	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
275	incl	%ecx
276	andl	$32767, %ecx
277	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
278	pushl	$CNAME(apic_itrace_debuglock)
279	call	CNAME(s_unlock_np)
280	addl	$4, %esp
281	popf
282	ret
283
284
285#define APIC_ITRACE(name, irq_num, id)					\
286	lock ;					/* MP-safe */		\
287	incl	CNAME(name) + (irq_num) * 4 ;				\
288	pushl	%eax ;							\
289	pushl	%ecx ;							\
290	pushl	%edx ;							\
291	movl	$(irq_num), %eax ;					\
292	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
293	jne	7f ;							\
294	pushl	$id ;							\
295	call	log_intr_event ;					\
296	addl	$4, %esp ;						\
2977: ;									\
298	popl	%edx ;							\
299	popl	%ecx ;							\
300	popl	%eax
301#else
302#define APIC_ITRACE(name, irq_num, id)					\
303	lock ;					/* MP-safe */		\
304	incl	CNAME(name) + (irq_num) * 4
305#endif
306
307#define APIC_ITRACE_ENTER 1
308#define APIC_ITRACE_EOI 2
309#define APIC_ITRACE_TRYISRLOCK 3
310#define APIC_ITRACE_GOTISRLOCK 4
311#define APIC_ITRACE_ENTER2 5
312#define APIC_ITRACE_LEAVE 6
313#define APIC_ITRACE_UNMASK 7
314#define APIC_ITRACE_ACTIVE 8
315#define APIC_ITRACE_MASKED 9
316#define APIC_ITRACE_NOISRLOCK 10
317#define APIC_ITRACE_MASKED2 11
318#define APIC_ITRACE_SPLZ 12
319#define APIC_ITRACE_DORETI 13
320
321#else
322#define APIC_ITRACE(name, irq_num, id)
323#endif
324
325#ifdef CPL_AND_CML
326
327#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
328	.text ;								\
329	SUPERALIGN_TEXT ;						\
330/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
331IDTVEC(vec_name) ;							\
332	PUSH_FRAME ;							\
333	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
334	movl	%ax, %ds ;						\
335	movl	%ax, %es ;						\
336	movl	$KPSEL, %eax ;						\
337	movl	%ax, %fs ;						\
338;									\
339	maybe_extra_ipending ;						\
340;									\
341	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
342	lock ;					/* MP-safe */		\
343	btsl	$(irq_num), iactive ;		/* lazy masking */	\
344	jc	1f ;				/* already active */	\
345;									\
346	MASK_LEVEL_IRQ(irq_num) ;					\
347	EOI_IRQ(irq_num) ;						\
3480: ;									\
349	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
350	ENLOCK ;							\
351;									\
352	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
353	AVCPL_LOCK ;				/* MP-safe */		\
354	testl	$IRQ_BIT(irq_num), _cpl ;				\
355	jne	2f ;				/* this INT masked */	\
356	testl	$IRQ_BIT(irq_num), _cml ;				\
357	jne	2f ;				/* this INT masked */	\
358	orl	$IRQ_BIT(irq_num), _cil ;				\
359	AVCPL_UNLOCK ;							\
360;									\
361	incb	_intr_nesting_level ;					\
362;	 								\
363  /* entry point used by doreti_unpend for HWIs. */			\
364__CONCAT(Xresume,irq_num): ;						\
365	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
366	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
367	movl	_intr_countp + (irq_num) * 4, %eax ;			\
368	lock ;	incl	(%eax) ;					\
369;									\
370	AVCPL_LOCK ;				/* MP-safe */		\
371	movl	_cml, %eax ;						\
372	pushl	%eax ;							\
373	orl	_intr_mask + (irq_num) * 4, %eax ;			\
374	movl	%eax, _cml ;						\
375	AVCPL_UNLOCK ;							\
376;									\
377	pushl	_intr_unit + (irq_num) * 4 ;				\
378	incl	_inside_intr ;						\
379	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
380	sti ;								\
381	call	*_intr_handler + (irq_num) * 4 ;			\
382	cli ;								\
383	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
384	decl	_inside_intr ;						\
385;									\
386	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
387	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
388	UNMASK_IRQ(irq_num) ;						\
389	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
390	sti ;				/* doreti repeats cli/sti */	\
391	MEXITCOUNT ;							\
392	LATELOCK ;							\
393	jmp	_doreti ;						\
394;									\
395	ALIGN_TEXT ;							\
3961: ;						/* active */		\
397	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
398	MASK_IRQ(irq_num) ;						\
399	EOI_IRQ(irq_num) ;						\
400	AVCPL_LOCK ;				/* MP-safe */		\
401	lock ;								\
402	orl	$IRQ_BIT(irq_num), _ipending ;				\
403	AVCPL_UNLOCK ;							\
404	lock ;								\
405	btsl	$(irq_num), iactive ;		/* still active */	\
406	jnc	0b ;				/* retry */		\
407	POP_FRAME ;							\
408	iret ;								\
409;									\
410	ALIGN_TEXT ;							\
4112: ;						/* masked by cpl|cml */	\
412	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
413	lock ;								\
414	orl	$IRQ_BIT(irq_num), _ipending ;				\
415	AVCPL_UNLOCK ;							\
416	DELOCK ;		/* XXX this is going away... */		\
417	POP_FRAME ;							\
418	iret ;								\
419	ALIGN_TEXT ;							\
4203: ; 			/* other cpu has isr lock */			\
421	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
422	AVCPL_LOCK ;				/* MP-safe */		\
423	lock ;								\
424	orl	$IRQ_BIT(irq_num), _ipending ;				\
425	testl	$IRQ_BIT(irq_num), _cpl ;				\
426	jne	4f ;				/* this INT masked */	\
427	testl	$IRQ_BIT(irq_num), _cml ;				\
428	jne	4f ;				/* this INT masked */	\
429	orl	$IRQ_BIT(irq_num), _cil ;				\
430	AVCPL_UNLOCK ;							\
431	call	forward_irq ;	/* forward irq to lock holder */	\
432	POP_FRAME ;	 			/* and return */	\
433	iret ;								\
434	ALIGN_TEXT ;							\
4354: ;	 					/* blocked */		\
436	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
437	AVCPL_UNLOCK ;							\
438	POP_FRAME ;	 			/* and return */	\
439	iret
440
441#else /* CPL_AND_CML */
442
443
444#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
445	.text ;								\
446	SUPERALIGN_TEXT ;						\
447/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
448IDTVEC(vec_name) ;							\
449	PUSH_FRAME ;							\
450	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
451	movl	%ax, %ds ;						\
452	movl	%ax, %es ;						\
453	movl	$KPSEL, %eax ;						\
454	movl	%ax, %fs ;						\
455;									\
456	maybe_extra_ipending ;						\
457;									\
458	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
459	lock ;					/* MP-safe */		\
460	btsl	$(irq_num), iactive ;		/* lazy masking */	\
461	jc	1f ;				/* already active */	\
462;									\
463	MASK_LEVEL_IRQ(irq_num) ;					\
464	EOI_IRQ(irq_num) ;						\
4650: ;									\
466	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
467	ISR_TRYLOCK ;		/* XXX this is going away... */		\
468	testl	%eax, %eax ;			/* did we get it? */	\
469	jz	3f ;				/* no */		\
470;									\
471	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
472	AVCPL_LOCK ;				/* MP-safe */		\
473	testl	$IRQ_BIT(irq_num), _cpl ;				\
474	jne	2f ;				/* this INT masked */	\
475	AVCPL_UNLOCK ;							\
476;									\
477	incb	_intr_nesting_level ;					\
478;	 								\
479  /* entry point used by doreti_unpend for HWIs. */			\
480__CONCAT(Xresume,irq_num): ;						\
481	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
482	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
483	movl	_intr_countp + (irq_num) * 4, %eax ;			\
484	lock ;	incl	(%eax) ;					\
485;									\
486	AVCPL_LOCK ;				/* MP-safe */		\
487	movl	_cpl, %eax ;						\
488	pushl	%eax ;							\
489	orl	_intr_mask + (irq_num) * 4, %eax ;			\
490	movl	%eax, _cpl ;						\
491	lock ;								\
492	andl	$~IRQ_BIT(irq_num), _ipending ;				\
493	AVCPL_UNLOCK ;							\
494;									\
495	pushl	_intr_unit + (irq_num) * 4 ;				\
496	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
497	sti ;								\
498	call	*_intr_handler + (irq_num) * 4 ;			\
499	cli ;								\
500	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
501;									\
502	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
503	UNMASK_IRQ(irq_num) ;						\
504	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
505	sti ;				/* doreti repeats cli/sti */	\
506	MEXITCOUNT ;							\
507	jmp	_doreti ;						\
508;									\
509	ALIGN_TEXT ;							\
5101: ;						/* active  */		\
511	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
512	MASK_IRQ(irq_num) ;						\
513	EOI_IRQ(irq_num) ;						\
514	AVCPL_LOCK ;				/* MP-safe */		\
515	lock ;								\
516	orl	$IRQ_BIT(irq_num), _ipending ;				\
517	AVCPL_UNLOCK ;							\
518	lock ;								\
519	btsl	$(irq_num), iactive ;		/* still active */	\
520	jnc	0b ;				/* retry */		\
521	POP_FRAME ;							\
522	iret ;		/* XXX:	 iactive bit might be 0 now */		\
523	ALIGN_TEXT ;							\
5242: ;				/* masked by cpl, leave iactive set */	\
525	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
526	lock ;								\
527	orl	$IRQ_BIT(irq_num), _ipending ;				\
528	AVCPL_UNLOCK ;							\
529	ISR_RELLOCK ;		/* XXX this is going away... */		\
530	POP_FRAME ;							\
531	iret ;								\
532	ALIGN_TEXT ;							\
5333: ; 			/* other cpu has isr lock */			\
534	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
535	AVCPL_LOCK ;				/* MP-safe */		\
536	lock ;								\
537	orl	$IRQ_BIT(irq_num), _ipending ;				\
538	testl	$IRQ_BIT(irq_num), _cpl ;				\
539	jne	4f ;				/* this INT masked */	\
540	AVCPL_UNLOCK ;							\
541	call	forward_irq ;	 /* forward irq to lock holder */	\
542	POP_FRAME ;	 			/* and return */	\
543	iret ;								\
544	ALIGN_TEXT ;							\
5454: ;	 					/* blocked */		\
546	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
547	AVCPL_UNLOCK ;							\
548	POP_FRAME ;	 			/* and return */	\
549	iret
550
551#endif /* CPL_AND_CML */
552
553
554/*
555 * Handle "spurious INTerrupts".
556 * Notes:
557 *  This is different than the "spurious INTerrupt" generated by an
558 *   8259 PIC for missing INTs.  See the APIC documentation for details.
559 *  This routine should NOT do an 'EOI' cycle.
560 */
561	.text
562	SUPERALIGN_TEXT
563	.globl _Xspuriousint
564_Xspuriousint:
565
566	/* No EOI cycle used here */
567
568	iret
569
570
571/*
572 * Handle TLB shootdowns.
573 */
574	.text
575	SUPERALIGN_TEXT
576	.globl	_Xinvltlb
577_Xinvltlb:
578	pushl	%eax
579
580#ifdef COUNT_XINVLTLB_HITS
581	pushl	%fs
582	movl	$KPSEL, %eax
583	movl	%ax, %fs
584	movl	_cpuid, %eax
585	popl	%fs
586	ss
587	incl	_xhits(,%eax,4)
588#endif /* COUNT_XINVLTLB_HITS */
589
590	movl	%cr3, %eax		/* invalidate the TLB */
591	movl	%eax, %cr3
592
593	ss				/* stack segment, avoid %ds load */
594	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
595
596	popl	%eax
597	iret
598
599
600#ifdef BETTER_CLOCK
601
602/*
603 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
604 *
605 *  - Stores current cpu state in checkstate_cpustate[cpuid]
606 *      0 == user, 1 == sys, 2 == intr
607 *  - Stores current process in checkstate_curproc[cpuid]
608 *
609 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
610 *
611 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
612 */
613
614	.text
615	SUPERALIGN_TEXT
616	.globl _Xcpucheckstate
617	.globl _checkstate_cpustate
618	.globl _checkstate_curproc
619	.globl _checkstate_pc
620_Xcpucheckstate:
621	pushl	%eax
622	pushl	%ebx
623	pushl	%ds			/* save current data segment */
624	pushl	%fs
625
626	movl	$KDSEL, %eax
627	movl	%ax, %ds		/* use KERNEL data segment */
628	movl	$KPSEL, %eax
629	movl	%ax, %fs
630
631	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
632
633	movl	$0, %ebx
634	movl	20(%esp), %eax
635	andl	$3, %eax
636	cmpl	$3, %eax
637	je	1f
638	testl	$PSL_VM, 24(%esp)
639	jne	1f
640	incl	%ebx			/* system or interrupt */
641#ifdef CPL_AND_CML
642	cmpl	$0, _inside_intr
643	je	1f
644	incl	%ebx			/* interrupt */
645#endif
6461:
647	movl	_cpuid, %eax
648	movl	%ebx, _checkstate_cpustate(,%eax,4)
649	movl	_curproc, %ebx
650	movl	%ebx, _checkstate_curproc(,%eax,4)
651	movl	16(%esp), %ebx
652	movl	%ebx, _checkstate_pc(,%eax,4)
653
654	lock				/* checkstate_probed_cpus |= (1<<id) */
655	btsl	%eax, _checkstate_probed_cpus
656
657	popl	%fs
658	popl	%ds			/* restore previous data segment */
659	popl	%ebx
660	popl	%eax
661	iret
662
663#endif /* BETTER_CLOCK */
664
665/*
666 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
667 *
668 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
669 *
670 *  - We need a better method of triggering asts on other cpus.
671 */
672
673	.text
674	SUPERALIGN_TEXT
675	.globl _Xcpuast
676_Xcpuast:
677	PUSH_FRAME
678	movl	$KDSEL, %eax
679	movl	%ax, %ds		/* use KERNEL data segment */
680	movl	%ax, %es
681	movl	$KPSEL, %eax
682	movl	%ax, %fs
683
684	movl	_cpuid, %eax
685	lock				/* checkstate_need_ast &= ~(1<<id) */
686	btrl	%eax, _checkstate_need_ast
687	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
688
689	lock
690	btsl	%eax, _checkstate_pending_ast
691	jc	1f
692
693	FAKE_MCOUNT(13*4(%esp))
694
695	/*
696	 * Giant locks do not come cheap.
697	 * A lot of cycles are going to be wasted here.
698	 */
699	call	_get_isrlock
700
701	AVCPL_LOCK
702#ifdef CPL_AND_CML
703	movl	_cml, %eax
704#else
705	movl	_cpl, %eax
706#endif
707	pushl	%eax
708	movl	$1, _astpending		/* XXX */
709	AVCPL_UNLOCK
710	lock
711	incb	_intr_nesting_level
712	sti
713
714	pushl	$0
715
716	movl	_cpuid, %eax
717	lock
718	btrl	%eax, _checkstate_pending_ast
719	lock
720	btrl	%eax, CNAME(resched_cpus)
721	jnc	2f
722	movl	$1, CNAME(want_resched)
723	lock
724	incl	CNAME(want_resched_cnt)
7252:
726	lock
727	incl	CNAME(cpuast_cnt)
728	MEXITCOUNT
729	jmp	_doreti
7301:
731	/* We are already in the process of delivering an ast for this CPU */
732	POP_FRAME
733	iret
734
735
736/*
737 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
738 */
739
740	.text
741	SUPERALIGN_TEXT
742	.globl _Xforward_irq
743_Xforward_irq:
744	PUSH_FRAME
745	movl	$KDSEL, %eax
746	movl	%ax, %ds		/* use KERNEL data segment */
747	movl	%ax, %es
748	movl	$KPSEL, %eax
749	movl	%ax, %fs
750
751	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
752
753	FAKE_MCOUNT(13*4(%esp))
754
755	ISR_TRYLOCK
756	testl	%eax,%eax		/* Did we get the lock ? */
757	jz  1f				/* No */
758
759	lock
760	incl	CNAME(forward_irq_hitcnt)
761	cmpb	$4, _intr_nesting_level
762	jae	2f
763
764	AVCPL_LOCK
765#ifdef CPL_AND_CML
766	movl	_cml, %eax
767#else
768	movl	_cpl, %eax
769#endif
770	pushl	%eax
771	AVCPL_UNLOCK
772	lock
773	incb	_intr_nesting_level
774	sti
775
776	pushl	$0
777
778	MEXITCOUNT
779	jmp	_doreti			/* Handle forwarded interrupt */
7801:
781	lock
782	incl	CNAME(forward_irq_misscnt)
783	call	forward_irq	/* Oops, we've lost the isr lock */
784	MEXITCOUNT
785	POP_FRAME
786	iret
7872:
788	lock
789	incl	CNAME(forward_irq_toodeepcnt)
7903:
791	ISR_RELLOCK
792	MEXITCOUNT
793	POP_FRAME
794	iret
795
796/*
797 *
798 */
799forward_irq:
800	MCOUNT
801	cmpl	$0,_invltlb_ok
802	jz	4f
803
804	cmpl	$0, CNAME(forward_irq_enabled)
805	jz	4f
806
807	movl	_mp_lock,%eax
808	cmpl	$FREE_LOCK,%eax
809	jne	1f
810	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
8111:
812	shrl	$24,%eax
813	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
814	shll	$24,%ecx
815	movl	lapic_icr_hi, %eax
816	andl	$~APIC_ID_MASK, %eax
817	orl	%ecx, %eax
818	movl	%eax, lapic_icr_hi
819
8202:
821	movl	lapic_icr_lo, %eax
822	andl	$APIC_DELSTAT_MASK,%eax
823	jnz	2b
824	movl	lapic_icr_lo, %eax
825	andl	$APIC_RESV2_MASK, %eax
826	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
827	movl	%eax, lapic_icr_lo
8283:
829	movl	lapic_icr_lo, %eax
830	andl	$APIC_DELSTAT_MASK,%eax
831	jnz	3b
8324:
833	ret
834
835/*
836 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
837 *
838 *  - Signals its receipt.
839 *  - Waits for permission to restart.
840 *  - Signals its restart.
841 */
842
843	.text
844	SUPERALIGN_TEXT
845	.globl _Xcpustop
846_Xcpustop:
847	pushl	%ebp
848	movl	%esp, %ebp
849	pushl	%eax
850	pushl	%ecx
851	pushl	%edx
852	pushl	%ds			/* save current data segment */
853	pushl	%fs
854
855	movl	$KDSEL, %eax
856	movl	%ax, %ds		/* use KERNEL data segment */
857	movl	$KPSEL, %eax
858	movl	%ax, %fs
859
860	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
861
862	movl	_cpuid, %eax
863	imull	$PCB_SIZE, %eax
864	leal	CNAME(stoppcbs)(%eax), %eax
865	pushl	%eax
866	call	CNAME(savectx)		/* Save process context */
867	addl	$4, %esp
868
869
870	movl	_cpuid, %eax
871
872	lock
873	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8741:
875	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
876	jnc	1b
877
878	lock
879	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
880	lock
881	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
882
883	test	%eax, %eax
884	jnz	2f
885
886	movl	CNAME(cpustop_restartfunc), %eax
887	test	%eax, %eax
888	jz	2f
889	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
890
891	call	%eax
8922:
893	popl	%fs
894	popl	%ds			/* restore previous data segment */
895	popl	%edx
896	popl	%ecx
897	popl	%eax
898	movl	%ebp, %esp
899	popl	%ebp
900	iret
901
902
903MCOUNT_LABEL(bintr)
904	FAST_INTR(0,fastintr0)
905	FAST_INTR(1,fastintr1)
906	FAST_INTR(2,fastintr2)
907	FAST_INTR(3,fastintr3)
908	FAST_INTR(4,fastintr4)
909	FAST_INTR(5,fastintr5)
910	FAST_INTR(6,fastintr6)
911	FAST_INTR(7,fastintr7)
912	FAST_INTR(8,fastintr8)
913	FAST_INTR(9,fastintr9)
914	FAST_INTR(10,fastintr10)
915	FAST_INTR(11,fastintr11)
916	FAST_INTR(12,fastintr12)
917	FAST_INTR(13,fastintr13)
918	FAST_INTR(14,fastintr14)
919	FAST_INTR(15,fastintr15)
920	FAST_INTR(16,fastintr16)
921	FAST_INTR(17,fastintr17)
922	FAST_INTR(18,fastintr18)
923	FAST_INTR(19,fastintr19)
924	FAST_INTR(20,fastintr20)
925	FAST_INTR(21,fastintr21)
926	FAST_INTR(22,fastintr22)
927	FAST_INTR(23,fastintr23)
928#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
929	INTR(0,intr0, CLKINTR_PENDING)
930	INTR(1,intr1,)
931	INTR(2,intr2,)
932	INTR(3,intr3,)
933	INTR(4,intr4,)
934	INTR(5,intr5,)
935	INTR(6,intr6,)
936	INTR(7,intr7,)
937	INTR(8,intr8,)
938	INTR(9,intr9,)
939	INTR(10,intr10,)
940	INTR(11,intr11,)
941	INTR(12,intr12,)
942	INTR(13,intr13,)
943	INTR(14,intr14,)
944	INTR(15,intr15,)
945	INTR(16,intr16,)
946	INTR(17,intr17,)
947	INTR(18,intr18,)
948	INTR(19,intr19,)
949	INTR(20,intr20,)
950	INTR(21,intr21,)
951	INTR(22,intr22,)
952	INTR(23,intr23,)
953MCOUNT_LABEL(eintr)
954
955	.data
956/*
957 * Addresses of interrupt handlers.
958 *  XresumeNN: Resumption addresses for HWIs.
959 */
960	.globl _ihandlers
961_ihandlers:
962/*
963 * used by:
964 *  ipl.s:	doreti_unpend
965 */
966	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
967	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
968	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
969	.long	Xresume12, Xresume13, Xresume14, Xresume15
970	.long	Xresume16, Xresume17, Xresume18, Xresume19
971	.long	Xresume20, Xresume21, Xresume22, Xresume23
972/*
973 * used by:
974 *  ipl.s:	doreti_unpend
975 *  apic_ipl.s:	splz_unpend
976 */
977	.long	_swi_null, swi_net, _swi_null, _swi_null
978	.long	_swi_vm, _swi_null, _softclock, _swi_null
979
980imasks:				/* masks for interrupt handlers */
981	.space	NHWI*4		/* padding; HWI masks are elsewhere */
982
983	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
984	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0
985
986/* active flag for lazy masking */
987iactive:
988	.long	0
989
990#ifdef COUNT_XINVLTLB_HITS
991	.globl	_xhits
992_xhits:
993	.space	(NCPU * 4), 0
994#endif /* COUNT_XINVLTLB_HITS */
995
996/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
997	.globl _stopped_cpus, _started_cpus
998_stopped_cpus:
999	.long	0
1000_started_cpus:
1001	.long	0
1002
1003#ifdef BETTER_CLOCK
1004	.globl _checkstate_probed_cpus
1005_checkstate_probed_cpus:
1006	.long	0
1007#endif /* BETTER_CLOCK */
1008	.globl _checkstate_need_ast
1009_checkstate_need_ast:
1010	.long	0
1011_checkstate_pending_ast:
1012	.long	0
1013	.globl CNAME(forward_irq_misscnt)
1014	.globl CNAME(forward_irq_toodeepcnt)
1015	.globl CNAME(forward_irq_hitcnt)
1016	.globl CNAME(resched_cpus)
1017	.globl CNAME(want_resched_cnt)
1018	.globl CNAME(cpuast_cnt)
1019	.globl CNAME(cpustop_restartfunc)
1020CNAME(forward_irq_misscnt):
1021	.long 0
1022CNAME(forward_irq_hitcnt):
1023	.long 0
1024CNAME(forward_irq_toodeepcnt):
1025	.long 0
1026CNAME(resched_cpus):
1027	.long 0
1028CNAME(want_resched_cnt):
1029	.long 0
1030CNAME(cpuast_cnt):
1031	.long 0
1032CNAME(cpustop_restartfunc):
1033	.long 0
1034
1035
1036
1037	.globl	_apic_pin_trigger
1038_apic_pin_trigger:
1039	.long	0
1040
1041	.text
1042