apic_vector.s revision 38888
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.33 1998/09/04 23:03:04 luoqi Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	movl	$KDSEL,%eax ;						\
62	movl	%ax,%ds ;						\
63	MAYBE_MOVW_AX_ES ;						\
64	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
65	pushl	_intr_unit + (irq_num) * 4 ;				\
66	GET_FAST_INTR_LOCK ;						\
67	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68	REL_FAST_INTR_LOCK ;						\
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	MAYBE_POPL_ES ;							\
78	popl	%ds ;							\
79	popl	%edx ;							\
80	popl	%ecx ;							\
81	popl	%eax ;							\
82	iret
83
84#else /* FAST_WITHOUTCPL */
85
86#define	FAST_INTR(irq_num, vec_name)					\
87	.text ;								\
88	SUPERALIGN_TEXT ;						\
89IDTVEC(vec_name) ;							\
90	pushl	%eax ;		/* save only call-used registers */	\
91	pushl	%ecx ;							\
92	pushl	%edx ;							\
93	pushl	%ds ;							\
94	MAYBE_PUSHL_ES ;						\
95	movl	$KDSEL, %eax ;						\
96	movl	%ax, %ds ;						\
97	MAYBE_MOVW_AX_ES ;						\
98	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
99	GET_FAST_INTR_LOCK ;						\
100	pushl	_intr_unit + (irq_num) * 4 ;				\
101	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
102	addl	$4, %esp ;						\
103	movl	$0, lapic_eoi ;						\
104	lock ; 								\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	lock ; 								\
108	incl	(%eax) ;						\
109	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
110	notl	%eax ;							\
111	andl	_ipending, %eax ;					\
112	jne	2f ; 		/* yes, maybe handle them */		\
1131: ;									\
114	MEXITCOUNT ;							\
115	REL_FAST_INTR_LOCK ;						\
116	MAYBE_POPL_ES ;							\
117	popl	%ds ;							\
118	popl	%edx ;							\
119	popl	%ecx ;							\
120	popl	%eax ;							\
121	iret ;								\
122;									\
123	ALIGN_TEXT ;							\
1242: ;									\
125	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
126	jae	1b ;		/* no, return */			\
127	movl	_cpl, %eax ;						\
128	/* XXX next line is probably unnecessary now. */		\
129	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
130	lock ; 								\
131	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
132	sti ;			/* to do this as early as possible */	\
133	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
134	popl	%ecx ;		/* ... original %ds ... */		\
135	popl	%edx ;							\
136	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
137	pushal ;		/* build fat frame (grrr) ... */	\
138	pushl	%ecx ;		/* ... actually %ds ... */		\
139	pushl	%es ;							\
140	movl	$KDSEL, %eax ;						\
141	movl	%ax, %es ;						\
142	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
143	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
144	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
145	pushl	%eax ;							\
146	subl	$4, %esp ;	/* junk for unit number */		\
147	MEXITCOUNT ;							\
148	jmp	_doreti
149
150#endif /** FAST_WITHOUTCPL */
151
152
153/*
154 *
155 */
156#define PUSH_FRAME							\
157	pushl	$0 ;		/* dummy error code */			\
158	pushl	$0 ;		/* dummy trap type */			\
159	pushal ;							\
160	pushl	%ds ;		/* save data and extra segments ... */	\
161	pushl	%es
162
163#define POP_FRAME							\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp
168
169#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
170#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
171
172#define MASK_IRQ(irq_num)						\
173	IMASK_LOCK ;				/* into critical reg */	\
174	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
175	jne	7f ;			/* masked, don't mask */	\
176	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
177	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
178	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
179	movl	%eax, (%ecx) ;			/* write the index */	\
180	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
181	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
182	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1837: ;						/* already masked */	\
184	IMASK_UNLOCK
185/*
186 * Test to see whether we are handling an edge or level triggered INT.
187 *  Level-triggered INTs must still be masked as we don't clear the source,
188 *  and the EOI cycle would cause redundant INTs to occur.
189 */
190#define MASK_LEVEL_IRQ(irq_num)						\
191	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
192	jz	9f ;				/* edge, don't mask */	\
193	MASK_IRQ(irq_num) ;						\
1949:
195
196
197#ifdef APIC_INTR_REORDER
198#define EOI_IRQ(irq_num)						\
199	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
200	movl	(%eax), %eax ;						\
201	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
202	jz	9f ;				/* not active */	\
203	movl	$0, lapic_eoi ;						\
204	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2059:
206
207#else
208#define EOI_IRQ(irq_num)						\
209	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
210	jz	9f	;			/* not active */	\
211	movl	$0, lapic_eoi;						\
212	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2139:
214#endif
215
216
217/*
218 * Test to see if the source is currntly masked, clear if so.
219 */
220#define UNMASK_IRQ(irq_num)					\
221	IMASK_LOCK ;				/* into critical reg */	\
222	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
223	je	7f ;			/* bit clear, not masked */	\
224	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
225	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
226	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
227	movl	%eax,(%ecx) ;			/* write the index */	\
228	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
229	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
230	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2317: ;									\
232	IMASK_UNLOCK
233
234#ifdef INTR_SIMPLELOCK
235#define ENLOCK
236#define DELOCK
237#define LATELOCK call	_get_isrlock
238#else
239#define ENLOCK \
240	ISR_TRYLOCK ;		/* XXX this is going away... */		\
241	testl	%eax, %eax ;			/* did we get it? */	\
242	jz	3f
243#define DELOCK	ISR_RELLOCK
244#define LATELOCK
245#endif
246
247#ifdef APIC_INTR_DIAGNOSTIC
248#ifdef APIC_INTR_DIAGNOSTIC_IRQ
249log_intr_event:
250	pushf
251	cli
252	pushl	$CNAME(apic_itrace_debuglock)
253	call	_s_lock_np
254	addl	$4, %esp
255	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
256	andl	$32767, %ecx
257	movl	_cpuid, %eax
258	shll	$8,	%eax
259	orl	8(%esp), %eax
260	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
261	incl	%ecx
262	andl	$32767, %ecx
263	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
264	pushl	$CNAME(apic_itrace_debuglock)
265	call	_s_unlock_np
266	addl	$4, %esp
267	popf
268	ret
269
270
271#define APIC_ITRACE(name, irq_num, id)					\
272	lock ;					/* MP-safe */		\
273	incl	CNAME(name) + (irq_num) * 4 ;				\
274	pushl	%eax ;							\
275	pushl	%ecx ;							\
276	pushl	%edx ;							\
277	movl	$(irq_num), %eax ;					\
278	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
279	jne	7f ;							\
280	pushl	$id ;							\
281	call	log_intr_event ;					\
282	addl	$4, %esp ;						\
2837: ;									\
284	popl	%edx ;							\
285	popl	%ecx ;							\
286	popl	%eax
287#else
288#define APIC_ITRACE(name, irq_num, id)					\
289	lock ;					/* MP-safe */		\
290	incl	CNAME(name) + (irq_num) * 4
291#endif
292
293#define APIC_ITRACE_ENTER 1
294#define APIC_ITRACE_EOI 2
295#define APIC_ITRACE_TRYISRLOCK 3
296#define APIC_ITRACE_GOTISRLOCK 4
297#define APIC_ITRACE_ENTER2 5
298#define APIC_ITRACE_LEAVE 6
299#define APIC_ITRACE_UNMASK 7
300#define APIC_ITRACE_ACTIVE 8
301#define APIC_ITRACE_MASKED 9
302#define APIC_ITRACE_NOISRLOCK 10
303#define APIC_ITRACE_MASKED2 11
304#define APIC_ITRACE_SPLZ 12
305#define APIC_ITRACE_DORETI 13
306
307#else
308#define APIC_ITRACE(name, irq_num, id)
309#endif
310
311#ifdef CPL_AND_CML
312
313#define	INTR(irq_num, vec_name)						\
314	.text ;								\
315	SUPERALIGN_TEXT ;						\
316/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
317IDTVEC(vec_name) ;							\
318	PUSH_FRAME ;							\
319	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
320	movl	%ax, %ds ;						\
321	movl	%ax, %es ;						\
322;									\
323	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
324	lock ;					/* MP-safe */		\
325	btsl	$(irq_num), iactive ;		/* lazy masking */	\
326	jc	1f ;				/* already active */	\
327;									\
328	MASK_LEVEL_IRQ(irq_num) ;					\
329	EOI_IRQ(irq_num) ;						\
3300: ;									\
331	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
332	ENLOCK ;							\
333;									\
334	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
335	AVCPL_LOCK ;				/* MP-safe */		\
336	testl	$IRQ_BIT(irq_num), _cpl ;				\
337	jne	2f ;				/* this INT masked */	\
338	testl	$IRQ_BIT(irq_num), _cml ;				\
339	jne	2f ;				/* this INT masked */	\
340	orl	$IRQ_BIT(irq_num), _cil ;				\
341	AVCPL_UNLOCK ;							\
342;									\
343	incb	_intr_nesting_level ;					\
344;	 								\
345  /* entry point used by doreti_unpend for HWIs. */			\
346__CONCAT(Xresume,irq_num): ;						\
347	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
348	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
349	movl	_intr_countp + (irq_num) * 4, %eax ;			\
350	lock ;	incl	(%eax) ;					\
351;									\
352	AVCPL_LOCK ;				/* MP-safe */		\
353	movl	_cml, %eax ;						\
354	pushl	%eax ;							\
355	orl	_intr_mask + (irq_num) * 4, %eax ;			\
356	movl	%eax, _cml ;						\
357	AVCPL_UNLOCK ;							\
358;									\
359	pushl	_intr_unit + (irq_num) * 4 ;				\
360	incl	_inside_intr ;						\
361	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
362	sti ;								\
363	call	*_intr_handler + (irq_num) * 4 ;			\
364	cli ;								\
365	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
366	decl	_inside_intr ;						\
367;									\
368	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
369	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
370	UNMASK_IRQ(irq_num) ;						\
371	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
372	sti ;				/* doreti repeats cli/sti */	\
373	MEXITCOUNT ;							\
374	LATELOCK ;							\
375	jmp	_doreti ;						\
376;									\
377	ALIGN_TEXT ;							\
3781: ;						/* active */		\
379	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
380	MASK_IRQ(irq_num) ;						\
381	EOI_IRQ(irq_num) ;						\
382	AVCPL_LOCK ;				/* MP-safe */		\
383	orl	$IRQ_BIT(irq_num), _ipending ;				\
384	AVCPL_UNLOCK ;							\
385	lock ;								\
386	btsl	$(irq_num), iactive ;		/* still active */	\
387	jnc	0b ;				/* retry */		\
388	POP_FRAME ;							\
389	iret ;								\
390;									\
391	ALIGN_TEXT ;							\
3922: ;						/* masked by cpl|cml */	\
393	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
394	orl	$IRQ_BIT(irq_num), _ipending ;				\
395	AVCPL_UNLOCK ;							\
396	DELOCK ;		/* XXX this is going away... */		\
397	POP_FRAME ;							\
398	iret ;								\
399	ALIGN_TEXT ;							\
4003: ; 			/* other cpu has isr lock */			\
401	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
402	AVCPL_LOCK ;				/* MP-safe */		\
403	orl	$IRQ_BIT(irq_num), _ipending ;				\
404	testl	$IRQ_BIT(irq_num), _cpl ;				\
405	jne	4f ;				/* this INT masked */	\
406	testl	$IRQ_BIT(irq_num), _cml ;				\
407	jne	4f ;				/* this INT masked */	\
408	orl	$IRQ_BIT(irq_num), _cil ;				\
409	AVCPL_UNLOCK ;							\
410	call	forward_irq ;	/* forward irq to lock holder */	\
411	POP_FRAME ;	 			/* and return */	\
412	iret ;								\
413	ALIGN_TEXT ;							\
4144: ;	 					/* blocked */		\
415	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
416	AVCPL_UNLOCK ;							\
417	POP_FRAME ;	 			/* and return */	\
418	iret
419
420#else /* CPL_AND_CML */
421
422
423#define	INTR(irq_num, vec_name)						\
424	.text ;								\
425	SUPERALIGN_TEXT ;						\
426/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
427IDTVEC(vec_name) ;							\
428	PUSH_FRAME ;							\
429	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
430	movl	%ax, %ds ;						\
431	movl	%ax, %es ;						\
432;									\
433	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
434	lock ;					/* MP-safe */		\
435	btsl	$(irq_num), iactive ;		/* lazy masking */	\
436	jc	1f ;				/* already active */	\
437;									\
438	MASK_LEVEL_IRQ(irq_num) ;					\
439	EOI_IRQ(irq_num) ;						\
4400: ;									\
441	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
442	ISR_TRYLOCK ;		/* XXX this is going away... */		\
443	testl	%eax, %eax ;			/* did we get it? */	\
444	jz	3f ;				/* no */		\
445;									\
446	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
447	AVCPL_LOCK ;				/* MP-safe */		\
448	testl	$IRQ_BIT(irq_num), _cpl ;				\
449	jne	2f ;				/* this INT masked */	\
450	AVCPL_UNLOCK ;							\
451;									\
452	incb	_intr_nesting_level ;					\
453;	 								\
454  /* entry point used by doreti_unpend for HWIs. */			\
455__CONCAT(Xresume,irq_num): ;						\
456	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
457	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
458	movl	_intr_countp + (irq_num) * 4, %eax ;			\
459	lock ;	incl	(%eax) ;					\
460;									\
461	AVCPL_LOCK ;				/* MP-safe */		\
462	movl	_cpl, %eax ;						\
463	pushl	%eax ;							\
464	orl	_intr_mask + (irq_num) * 4, %eax ;			\
465	movl	%eax, _cpl ;						\
466	andl	$~IRQ_BIT(irq_num), _ipending ;				\
467	AVCPL_UNLOCK ;							\
468;									\
469	pushl	_intr_unit + (irq_num) * 4 ;				\
470	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
471	sti ;								\
472	call	*_intr_handler + (irq_num) * 4 ;			\
473	cli ;								\
474	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
475;									\
476	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
477	UNMASK_IRQ(irq_num) ;						\
478	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
479	sti ;				/* doreti repeats cli/sti */	\
480	MEXITCOUNT ;							\
481	jmp	_doreti ;						\
482;									\
483	ALIGN_TEXT ;							\
4841: ;						/* active  */		\
485	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
486	MASK_IRQ(irq_num) ;						\
487	EOI_IRQ(irq_num) ;						\
488	AVCPL_LOCK ;				/* MP-safe */		\
489	orl	$IRQ_BIT(irq_num), _ipending ;				\
490	AVCPL_UNLOCK ;							\
491	lock ;								\
492	btsl	$(irq_num), iactive ;		/* still active */	\
493	jnc	0b ;				/* retry */		\
494	POP_FRAME ;							\
495	iret ;		/* XXX:	 iactive bit might be 0 now */		\
496	ALIGN_TEXT ;							\
4972: ;				/* masked by cpl, leave iactive set */	\
498	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
499	orl	$IRQ_BIT(irq_num), _ipending ;				\
500	AVCPL_UNLOCK ;							\
501	ISR_RELLOCK ;		/* XXX this is going away... */		\
502	POP_FRAME ;							\
503	iret ;								\
504	ALIGN_TEXT ;							\
5053: ; 			/* other cpu has isr lock */			\
506	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
507	AVCPL_LOCK ;				/* MP-safe */		\
508	orl	$IRQ_BIT(irq_num), _ipending ;				\
509	testl	$IRQ_BIT(irq_num), _cpl ;				\
510	jne	4f ;				/* this INT masked */	\
511	AVCPL_UNLOCK ;							\
512	call	forward_irq ;	 /* forward irq to lock holder */	\
513	POP_FRAME ;	 			/* and return */	\
514	iret ;								\
515	ALIGN_TEXT ;							\
5164: ;	 					/* blocked */		\
517	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
518	AVCPL_UNLOCK ;							\
519	POP_FRAME ;	 			/* and return */	\
520	iret
521
522#endif /* CPL_AND_CML */
523
524
525/*
526 * Handle "spurious INTerrupts".
527 * Notes:
528 *  This is different than the "spurious INTerrupt" generated by an
529 *   8259 PIC for missing INTs.  See the APIC documentation for details.
530 *  This routine should NOT do an 'EOI' cycle.
531 */
532	.text
533	SUPERALIGN_TEXT
534	.globl _Xspuriousint
535_Xspuriousint:
536
537	/* No EOI cycle used here */
538
539	iret
540
541
542/*
543 * Handle TLB shootdowns.
544 */
545	.text
546	SUPERALIGN_TEXT
547	.globl	_Xinvltlb
548_Xinvltlb:
549	pushl	%eax
550
551#ifdef COUNT_XINVLTLB_HITS
552	ss
553	movl	_cpuid, %eax
554	ss
555	incl	_xhits(,%eax,4)
556#endif /* COUNT_XINVLTLB_HITS */
557
558	movl	%cr3, %eax		/* invalidate the TLB */
559	movl	%eax, %cr3
560
561	ss				/* stack segment, avoid %ds load */
562	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
563
564	popl	%eax
565	iret
566
567
568#ifdef BETTER_CLOCK
569
570/*
571 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
572 *
573 *  - Stores current cpu state in checkstate_cpustate[cpuid]
574 *      0 == user, 1 == sys, 2 == intr
575 *  - Stores current process in checkstate_curproc[cpuid]
576 *
577 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
578 *
579 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags
580 */
581
582	.text
583	SUPERALIGN_TEXT
584	.globl _Xcpucheckstate
585	.globl _checkstate_cpustate
586	.globl _checkstate_curproc
587	.globl _checkstate_pc
588_Xcpucheckstate:
589	pushl	%eax
590	pushl	%ebx
591	pushl	%ds			/* save current data segment */
592
593	movl	$KDSEL, %eax
594	movl	%ax, %ds		/* use KERNEL data segment */
595
596	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
597
598	movl	$0, %ebx
599	movl	16(%esp), %eax
600	andl	$3, %eax
601	cmpl	$3, %eax
602	je	1f
603#ifdef VM86
604	testl	$PSL_VM, 20(%esp)
605	jne	1f
606#endif
607	incl	%ebx			/* system or interrupt */
608#ifdef CPL_AND_CML
609	cmpl	$0, _inside_intr
610	je	1f
611	incl	%ebx			/* interrupt */
612#endif
6131:
614	movl	_cpuid, %eax
615	movl	%ebx, _checkstate_cpustate(,%eax,4)
616	movl	_curproc, %ebx
617	movl	%ebx, _checkstate_curproc(,%eax,4)
618	movl	12(%esp), %ebx
619	movl	%ebx, _checkstate_pc(,%eax,4)
620
621	lock				/* checkstate_probed_cpus |= (1<<id) */
622	btsl	%eax, _checkstate_probed_cpus
623
624	popl	%ds			/* restore previous data segment */
625	popl	%ebx
626	popl	%eax
627	iret
628
629#endif /* BETTER_CLOCK */
630
631/*
632 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
633 *
634 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
635 *
636 *  - We need a better method of triggering asts on other cpus.
637 */
638
639	.text
640	SUPERALIGN_TEXT
641	.globl _Xcpuast
642_Xcpuast:
643	PUSH_FRAME
644	movl	$KDSEL, %eax
645	movl	%ax, %ds		/* use KERNEL data segment */
646	movl	%ax, %es
647
648	movl	_cpuid, %eax
649	lock				/* checkstate_need_ast &= ~(1<<id) */
650	btrl	%eax, _checkstate_need_ast
651	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
652
653	lock
654	btsl	%eax, _checkstate_pending_ast
655	jc	1f
656
657	FAKE_MCOUNT(12*4(%esp))
658
659	/*
660	 * Giant locks do not come cheap.
661	 * A lot of cycles are going to be wasted here.
662	 */
663	call	_get_isrlock
664
665	AVCPL_LOCK
666#ifdef CPL_AND_CML
667	movl	_cml, %eax
668#else
669	movl	_cpl, %eax
670#endif
671	pushl	%eax
672	orl	$SWI_AST_PENDING, _ipending
673	AVCPL_UNLOCK
674	lock
675	incb	_intr_nesting_level
676	sti
677
678	pushl	$0
679
680	movl	_cpuid, %eax
681	lock
682	btrl	%eax, _checkstate_pending_ast
683	lock
684	btrl	%eax, CNAME(resched_cpus)
685	jz	2f
686	movl	$1, CNAME(want_resched)
687	lock
688	incl	CNAME(want_resched_cnt)
6892:
690	lock
691	incl	CNAME(cpuast_cnt)
692	MEXITCOUNT
693	jmp	_doreti
6941:
695	/* We are already in the process of delivering an ast for this CPU */
696	POP_FRAME
697	iret
698
699
700/*
701 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
702 */
703
704	.text
705	SUPERALIGN_TEXT
706	.globl _Xforward_irq
707_Xforward_irq:
708	PUSH_FRAME
709	movl	$KDSEL, %eax
710	movl	%ax, %ds		/* use KERNEL data segment */
711	movl	%ax, %es
712
713	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
714
715	FAKE_MCOUNT(12*4(%esp))
716
717	ISR_TRYLOCK
718	testl	%eax,%eax		/* Did we get the lock ? */
719	jz  1f				/* No */
720
721	lock
722	incl	CNAME(forward_irq_hitcnt)
723	cmpb	$4, _intr_nesting_level
724	jae	2f
725
726	AVCPL_LOCK
727#ifdef CPL_AND_CML
728	movl	_cml, %eax
729#else
730	movl	_cpl, %eax
731#endif
732	pushl	%eax
733	AVCPL_UNLOCK
734	lock
735	incb	_intr_nesting_level
736	sti
737
738	pushl	$0
739
740	MEXITCOUNT
741	jmp	_doreti			/* Handle forwarded interrupt */
7421:
743	lock
744	incl	CNAME(forward_irq_misscnt)
745	call	forward_irq	/* Oops, we've lost the isr lock */
746	MEXITCOUNT
747	POP_FRAME
748	iret
7492:
750	lock
751	incl	CNAME(forward_irq_toodeepcnt)
7523:
753	ISR_RELLOCK
754	MEXITCOUNT
755	POP_FRAME
756	iret
757
758/*
759 *
760 */
761forward_irq:
762	MCOUNT
763	cmpl	$0,_invltlb_ok
764	jz	4f
765
766	cmpl	$0, CNAME(forward_irq_enabled)
767	jz	4f
768
769	movl	_mp_lock,%eax
770	cmpl	$FREE_LOCK,%eax
771	jne	1f
772	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
7731:
774	shrl	$24,%eax
775	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
776	shll	$24,%ecx
777	movl	lapic_icr_hi, %eax
778	andl	$~APIC_ID_MASK, %eax
779	orl	%ecx, %eax
780	movl	%eax, lapic_icr_hi
781
7822:
783	movl	lapic_icr_lo, %eax
784	andl	$APIC_DELSTAT_MASK,%eax
785	jnz	2b
786	movl	lapic_icr_lo, %eax
787	andl	$APIC_RESV2_MASK, %eax
788	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
789	movl	%eax, lapic_icr_lo
7903:
791	movl	lapic_icr_lo, %eax
792	andl	$APIC_DELSTAT_MASK,%eax
793	jnz	3b
7944:
795	ret
796
797/*
798 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
799 *
800 *  - Signals its receipt.
801 *  - Waits for permission to restart.
802 *  - Signals its restart.
803 */
804
805	.text
806	SUPERALIGN_TEXT
807	.globl _Xcpustop
808_Xcpustop:
809	pushl	%ebp
810	movl	%esp, %ebp
811	pushl	%eax
812	pushl	%ecx
813	pushl	%edx
814	pushl	%ds			/* save current data segment */
815	pushl	%es
816
817	movl	$KDSEL, %eax
818	movl	%ax, %ds		/* use KERNEL data segment */
819
820	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
821
822	movl	_cpuid, %eax
823	imull	$PCB_SIZE, %eax
824	leal	CNAME(stoppcbs)(%eax), %eax
825	pushl	%eax
826	call	CNAME(savectx)		/* Save process context */
827	addl	$4, %esp
828
829
830	movl	_cpuid, %eax
831
832	lock
833	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8341:
835	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
836	jnc	1b
837
838	lock
839	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
840	lock
841	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
842
843	test	%eax, %eax
844	jnz	2f
845
846	movl	CNAME(cpustop_restartfunc), %eax
847	test	%eax, %eax
848	jz	2f
849	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
850
851	call	%eax
8522:
853	popl	%es
854	popl	%ds			/* restore previous data segment */
855	popl	%edx
856	popl	%ecx
857	popl	%eax
858	movl	%ebp, %esp
859	popl	%ebp
860	iret
861
862
863MCOUNT_LABEL(bintr)
864	FAST_INTR(0,fastintr0)
865	FAST_INTR(1,fastintr1)
866	FAST_INTR(2,fastintr2)
867	FAST_INTR(3,fastintr3)
868	FAST_INTR(4,fastintr4)
869	FAST_INTR(5,fastintr5)
870	FAST_INTR(6,fastintr6)
871	FAST_INTR(7,fastintr7)
872	FAST_INTR(8,fastintr8)
873	FAST_INTR(9,fastintr9)
874	FAST_INTR(10,fastintr10)
875	FAST_INTR(11,fastintr11)
876	FAST_INTR(12,fastintr12)
877	FAST_INTR(13,fastintr13)
878	FAST_INTR(14,fastintr14)
879	FAST_INTR(15,fastintr15)
880	FAST_INTR(16,fastintr16)
881	FAST_INTR(17,fastintr17)
882	FAST_INTR(18,fastintr18)
883	FAST_INTR(19,fastintr19)
884	FAST_INTR(20,fastintr20)
885	FAST_INTR(21,fastintr21)
886	FAST_INTR(22,fastintr22)
887	FAST_INTR(23,fastintr23)
888	INTR(0,intr0)
889	INTR(1,intr1)
890	INTR(2,intr2)
891	INTR(3,intr3)
892	INTR(4,intr4)
893	INTR(5,intr5)
894	INTR(6,intr6)
895	INTR(7,intr7)
896	INTR(8,intr8)
897	INTR(9,intr9)
898	INTR(10,intr10)
899	INTR(11,intr11)
900	INTR(12,intr12)
901	INTR(13,intr13)
902	INTR(14,intr14)
903	INTR(15,intr15)
904	INTR(16,intr16)
905	INTR(17,intr17)
906	INTR(18,intr18)
907	INTR(19,intr19)
908	INTR(20,intr20)
909	INTR(21,intr21)
910	INTR(22,intr22)
911	INTR(23,intr23)
912MCOUNT_LABEL(eintr)
913
914	.data
915/*
916 * Addresses of interrupt handlers.
917 *  XresumeNN: Resumption addresses for HWIs.
918 */
919	.globl _ihandlers
920_ihandlers:
921/*
922 * used by:
923 *  ipl.s:	doreti_unpend
924 */
925	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
926	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
927	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
928	.long	Xresume12, Xresume13, Xresume14, Xresume15
929	.long	Xresume16, Xresume17, Xresume18, Xresume19
930	.long	Xresume20, Xresume21, Xresume22, Xresume23
931/*
932 * used by:
933 *  ipl.s:	doreti_unpend
934 *  apic_ipl.s:	splz_unpend
935 */
936	.long	_swi_null, swi_net, _swi_null, _swi_null
937	.long	_swi_vm, _swi_null, _softclock, swi_ast
938
939imasks:				/* masks for interrupt handlers */
940	.space	NHWI*4		/* padding; HWI masks are elsewhere */
941
942	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
943	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
944
945/* active flag for lazy masking */
946iactive:
947	.long	0
948
949#ifdef COUNT_XINVLTLB_HITS
950	.globl	_xhits
951_xhits:
952	.space	(NCPU * 4), 0
953#endif /* COUNT_XINVLTLB_HITS */
954
955/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
956	.globl _stopped_cpus, _started_cpus
957_stopped_cpus:
958	.long	0
959_started_cpus:
960	.long	0
961
962#ifdef BETTER_CLOCK
963	.globl _checkstate_probed_cpus
964_checkstate_probed_cpus:
965	.long	0
966#endif /* BETTER_CLOCK */
967	.globl _checkstate_need_ast
968_checkstate_need_ast:
969	.long	0
970_checkstate_pending_ast:
971	.long	0
972	.globl CNAME(forward_irq_misscnt)
973	.globl CNAME(forward_irq_toodeepcnt)
974	.globl CNAME(forward_irq_hitcnt)
975	.globl CNAME(resched_cpus)
976	.globl CNAME(want_resched_cnt)
977	.globl CNAME(cpuast_cnt)
978	.globl CNAME(cpustop_restartfunc)
979CNAME(forward_irq_misscnt):
980	.long 0
981CNAME(forward_irq_hitcnt):
982	.long 0
983CNAME(forward_irq_toodeepcnt):
984	.long 0
985CNAME(resched_cpus):
986	.long 0
987CNAME(want_resched_cnt):
988	.long 0
989CNAME(cpuast_cnt):
990	.long 0
991CNAME(cpustop_restartfunc):
992	.long 0
993
994
995
996	.globl	_apic_pin_trigger
997_apic_pin_trigger:
998	.long	0
999
1000
1001/*
1002 * Interrupt counters and names.  The format of these and the label names
1003 * must agree with what vmstat expects.  The tables are indexed by device
1004 * ids so that we don't have to move the names around as devices are
1005 * attached.
1006 */
1007#include "vector.h"
1008	.globl	_intrcnt, _eintrcnt
1009_intrcnt:
1010	.space	(NR_DEVICES + ICU_LEN) * 4
1011_eintrcnt:
1012
1013	.globl	_intrnames, _eintrnames
1014_intrnames:
1015	.ascii	DEVICE_NAMES
1016	.asciz	"stray irq0"
1017	.asciz	"stray irq1"
1018	.asciz	"stray irq2"
1019	.asciz	"stray irq3"
1020	.asciz	"stray irq4"
1021	.asciz	"stray irq5"
1022	.asciz	"stray irq6"
1023	.asciz	"stray irq7"
1024	.asciz	"stray irq8"
1025	.asciz	"stray irq9"
1026	.asciz	"stray irq10"
1027	.asciz	"stray irq11"
1028	.asciz	"stray irq12"
1029	.asciz	"stray irq13"
1030	.asciz	"stray irq14"
1031	.asciz	"stray irq15"
1032	.asciz	"stray irq16"
1033	.asciz	"stray irq17"
1034	.asciz	"stray irq18"
1035	.asciz	"stray irq19"
1036	.asciz	"stray irq20"
1037	.asciz	"stray irq21"
1038	.asciz	"stray irq22"
1039	.asciz	"stray irq23"
1040_eintrnames:
1041
1042	.text
1043