apic_vector.s revision 27728
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.17 1997/07/28 03:51:01 smp Exp smp $
4 */
5
6
7#include <machine/smp.h>
8#include <machine/smptests.h>		/** PEND_INTS, various counters */
9#include "i386/isa/intr_machdep.h"
10
11/* convert an absolute IRQ# into a bitmask */
12#define IRQ_BIT(irq_num)	(1 << (irq_num))
13
14/* make an index into the IO APIC from the IRQ# */
15#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
16
17/*
18 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
19 */
20
21#ifdef PEND_INTS
22
23#ifdef FIRST_TRY
24
25#define MAYBE_MASK_IRQ(irq_num)						\
26	lock ;					/* MP-safe */		\
27	btsl	$(irq_num),iactive ;		/* lazy masking */	\
28	jnc	8f ;				/* NOT active */	\
297: ;									\
30	IMASK_LOCK ;				/* enter critical reg */\
31	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
32	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
33	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
34	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
35	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
36	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
37	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
38	IMASK_UNLOCK ;				/* exit critical reg */	\
39	movl	$0, lapic_eoi ;			/* do the EOI */	\
40	popl	%es ;							\
41	popl	%ds ;							\
42	popal ;								\
43	addl	$4+4,%esp ;						\
44	iret ;								\
45;									\
46	ALIGN_TEXT ;							\
478: ;									\
48	call	_try_mplock ;						\
49	testl	%eax, %eax ;						\
50	jz	7b				/* can't enter kernel */
51
52#else /** FIRST_TRY */
53
54/*
55 * the 1st version fails because masked edge-triggered INTs are lost
56 * by the IO APIC.  This version tests to see whether we are handling
57 * an edge or level triggered INT.  Level-triggered INTs must still be
58 * masked as we don't clear the source, and the EOI cycle would allow
59 * recursive INTs to occur.
60 */
61#define MAYBE_MASK_IRQ(irq_num)						\
62	lock ;					/* MP-safe */		\
63	btsl	$(irq_num),iactive ;		/* lazy masking */	\
64	jc	6f ;				/* already active */	\
65	call	_try_mplock ;			/* try to get lock */	\
66	testl	%eax, %eax ;			/* did we get it? */	\
67	jnz	8f ;				/* yes, enter kernel */	\
686: ;						/* active or locked */	\
69	IMASK_LOCK ;				/* into critical reg */	\
70	testl	$IRQ_BIT(irq_num),_apic_pin_trigger ;			\
71	jz	7f ;				/* edge, don't mask */	\
72	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
73	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
74	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
75	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
76	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
77	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
787: ;									\
79	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
80	IMASK_UNLOCK ;				/* exit critical reg */	\
81	movl	$0, lapic_eoi ;			/* do the EOI */	\
82	popl	%es ;							\
83	popl	%ds ;							\
84	popal ;								\
85	addl	$4+4,%esp ;						\
86	iret ;								\
87;									\
88	ALIGN_TEXT ;							\
898:
90
91#endif /** FIRST_TRY */
92
93#else /* PEND_INTS */
94
95#define MAYBE_MASK_IRQ(irq_num)						\
96	lock ;					/* MP-safe */		\
97	btsl	$(irq_num),iactive ;		/* lazy masking */	\
98	jnc	1f ;				/* NOT active */	\
99	IMASK_LOCK ;				/* enter critical reg */\
100	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
101	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
102	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
103	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
104	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
105	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
106	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
107	movl	$0, lapic_eoi ;			/* do the EOI */	\
108	IMASK_UNLOCK ;				/* exit critical reg */	\
109	popl	%es ;							\
110	popl	%ds ;							\
111	popal ;								\
112	addl	$4+4,%esp ;						\
113	iret ;								\
114;									\
115	ALIGN_TEXT ;							\
1161: ;									\
117	GET_MPLOCK				/* SMP Spin lock */
118
119#endif /* PEND_INTS */
120
121
122#define MAYBE_UNMASK_IRQ(irq_num)					\
123	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
124	lock ;					/* MP-safe */		\
125	andl	$~IRQ_BIT(irq_num),iactive ;				\
126	IMASK_LOCK ;				/* enter critical reg */\
127	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
128	je	9f ;							\
129	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
130	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
131	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
132	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
133	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
134	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1359: ;									\
136	IMASK_UNLOCK ;				/* exit critical reg */	\
137	sti	/* XXX _doreti repeats the cli/sti */
138
139
140/*
141 * Macros for interrupt interrupt entry, call to handler, and exit.
142 */
143
144#define	FAST_INTR(irq_num, vec_name)					\
145	.text ;								\
146	SUPERALIGN_TEXT ;						\
147IDTVEC(vec_name) ;							\
148	pushl	%eax ;		/* save only call-used registers */	\
149	pushl	%ecx ;							\
150	pushl	%edx ;							\
151	pushl	%ds ;							\
152	MAYBE_PUSHL_ES ;						\
153	movl	$KDSEL,%eax ;						\
154	movl	%ax,%ds ;						\
155	MAYBE_MOVW_AX_ES ;						\
156	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
157	GET_MPLOCK ;							\
158	pushl	_intr_unit + (irq_num) * 4 ;				\
159	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
160	movl	$0, lapic_eoi ;						\
161	addl	$4,%esp ;						\
162	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
163	movl	_intr_countp + (irq_num) * 4,%eax ;			\
164	incl	(%eax) ;						\
165	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
166	notl	%eax ;							\
167	andl	_ipending,%eax ;					\
168	jne	2f ; 		/* yes, maybe handle them */		\
1691: ;									\
170	MEXITCOUNT ;							\
171	REL_MPLOCK ;		/* SMP release global lock */		\
172	MAYBE_POPL_ES ;							\
173	popl	%ds ;							\
174	popl	%edx ;							\
175	popl	%ecx ;							\
176	popl	%eax ;							\
177	iret ;								\
178;									\
179	ALIGN_TEXT ;							\
1802: ;									\
181	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
182	jae	1b ;		/* no, return */			\
183	movl	_cpl,%eax ;						\
184	/* XXX next line is probably unnecessary now. */		\
185	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
186	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
187	sti ;			/* to do this as early as possible */	\
188	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
189	popl	%ecx ;		/* ... original %ds ... */		\
190	popl	%edx ;							\
191	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
192	pushal ;		/* build fat frame (grrr) ... */	\
193	pushl	%ecx ;		/* ... actually %ds ... */		\
194	pushl	%es ;							\
195	movl	$KDSEL,%eax ;						\
196	movl	%ax,%es ;						\
197	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
198	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
199	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
200	pushl	%eax ;							\
201	subl	$4,%esp ;	/* junk for unit number */		\
202	MEXITCOUNT ;							\
203	jmp	_doreti
204
205#define	INTR(irq_num, vec_name)						\
206	.text ;								\
207	SUPERALIGN_TEXT ;						\
208IDTVEC(vec_name) ;							\
209	pushl	$0 ;		/* dummy error code */			\
210	pushl	$0 ;		/* dummy trap type */			\
211	pushal ;							\
212	pushl	%ds ;		/* save data and extra segments ... */	\
213	pushl	%es ;							\
214	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
215	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
216	movl	%ax,%es ;						\
217	MAYBE_MASK_IRQ(irq_num) ;					\
218	movl	$0, lapic_eoi ;						\
219	movl	_cpl,%eax ;						\
220	testl	$IRQ_BIT(irq_num), %eax ;				\
221	jne	3f ;							\
222	incb	_intr_nesting_level ;					\
223__CONCAT(Xresume,irq_num): ;						\
224	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
225	incl	_cnt+V_INTR ;	/* tally interrupts */			\
226	movl	_intr_countp + (irq_num) * 4,%eax ;			\
227	incl	(%eax) ;						\
228	movl	_cpl,%eax ;						\
229	pushl	%eax ;							\
230	pushl	_intr_unit + (irq_num) * 4 ;				\
231	orl	_intr_mask + (irq_num) * 4,%eax ;			\
232	movl	%eax,_cpl ;						\
233	sti ;								\
234	call	*_intr_handler + (irq_num) * 4 ;			\
235	MAYBE_UNMASK_IRQ(irq_num) ;					\
236	MEXITCOUNT ;							\
237	jmp	_doreti ;						\
238;									\
239	ALIGN_TEXT ;							\
2403: ;									\
241	/* XXX skip mcounting here to avoid double count */		\
242	orl	$IRQ_BIT(irq_num), _ipending ;				\
243	REL_MPLOCK ;		/* SMP release global lock */		\
244	popl	%es ;							\
245	popl	%ds ;							\
246	popal ;								\
247	addl	$4+4,%esp ;						\
248	iret
249
250
251/*
252 * Handle "spurious INTerrupts".
253 * Notes:
254 *  This is different than the "spurious INTerrupt" generated by an
255 *   8259 PIC for missing INTs.  See the APIC documentation for details.
256 *  This routine should NOT do an 'EOI' cycle.
257 */
258	.text
259	SUPERALIGN_TEXT
260	.globl _Xspuriousint
261_Xspuriousint:
262#ifdef COUNT_SPURIOUS_INTS
263	ss
264	incl	_sihits
265#endif
266
267	/* No EOI cycle used here */
268
269	iret
270
271
272/*
273 * Handle TLB shootdowns.
274 */
275	.text
276	SUPERALIGN_TEXT
277	.globl	_Xinvltlb
278_Xinvltlb:
279	pushl	%eax
280
281#ifdef COUNT_XINVLTLB_HITS
282	ss
283	movl	_cpuid, %eax
284	ss
285	incl	_xhits(,%eax,4)
286#endif /* COUNT_XINVLTLB_HITS */
287
288	movl	%cr3, %eax		/* invalidate the TLB */
289	movl	%eax, %cr3
290
291	ss				/* stack segment, avoid %ds load */
292	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
293
294	popl	%eax
295	iret
296
297
298/*
299 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
300 *
301 *  - Signals its receipt.
302 *  - Waits for permission to restart.
303 *  - Signals its restart.
304 */
305
306	.text
307	SUPERALIGN_TEXT
308	.globl _Xcpustop
309_Xcpustop:
310	pushl	%eax
311	pushl	%ds			/* save current data segment */
312
313	movl	$KDSEL, %eax
314	movl	%ax, %ds		/* use KERNEL data segment */
315
316	movl	_cpuid, %eax
317
318#ifdef COUNT_CSHITS
319	incl	_cshits(,%eax,4)
320#endif /* COUNT_CSHITS */
321
322	ASMPOSTCODE_HI(0x1)
323
324	lock
325	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
326
327	ASMPOSTCODE_HI(0x2);
3281:
329	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
330	jnc	1b
331
332	ASMPOSTCODE_HI(0x3)
333
334	lock
335	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
336
337	ASMPOSTCODE_HI(0x4)
338
339	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
340
341	popl	%ds			/* restore previous data segment */
342	popl	%eax
343	iret
344
345
346MCOUNT_LABEL(bintr)
347	FAST_INTR(0,fastintr0)
348	FAST_INTR(1,fastintr1)
349	FAST_INTR(2,fastintr2)
350	FAST_INTR(3,fastintr3)
351	FAST_INTR(4,fastintr4)
352	FAST_INTR(5,fastintr5)
353	FAST_INTR(6,fastintr6)
354	FAST_INTR(7,fastintr7)
355	FAST_INTR(8,fastintr8)
356	FAST_INTR(9,fastintr9)
357	FAST_INTR(10,fastintr10)
358	FAST_INTR(11,fastintr11)
359	FAST_INTR(12,fastintr12)
360	FAST_INTR(13,fastintr13)
361	FAST_INTR(14,fastintr14)
362	FAST_INTR(15,fastintr15)
363	FAST_INTR(16,fastintr16)
364	FAST_INTR(17,fastintr17)
365	FAST_INTR(18,fastintr18)
366	FAST_INTR(19,fastintr19)
367	FAST_INTR(20,fastintr20)
368	FAST_INTR(21,fastintr21)
369	FAST_INTR(22,fastintr22)
370	FAST_INTR(23,fastintr23)
371	INTR(0,intr0)
372	INTR(1,intr1)
373	INTR(2,intr2)
374	INTR(3,intr3)
375	INTR(4,intr4)
376	INTR(5,intr5)
377	INTR(6,intr6)
378	INTR(7,intr7)
379	INTR(8,intr8)
380	INTR(9,intr9)
381	INTR(10,intr10)
382	INTR(11,intr11)
383	INTR(12,intr12)
384	INTR(13,intr13)
385	INTR(14,intr14)
386	INTR(15,intr15)
387	INTR(16,intr16)
388	INTR(17,intr17)
389	INTR(18,intr18)
390	INTR(19,intr19)
391	INTR(20,intr20)
392	INTR(21,intr21)
393	INTR(22,intr22)
394	INTR(23,intr23)
395MCOUNT_LABEL(eintr)
396
397	.data
398ihandlers:			/* addresses of interrupt handlers */
399				/* actually resumption addresses for HWI's */
400	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
401	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
402	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
403	.long	Xresume12, Xresume13, Xresume14, Xresume15
404	.long	Xresume16, Xresume17, Xresume18, Xresume19
405	.long	Xresume20, Xresume21, Xresume22, Xresume23
406	.long	swi_tty,   swi_net
407	.long	0, 0, 0, 0
408	.long	_softclock, swi_ast
409
410imasks:				/* masks for interrupt handlers */
411	.space	NHWI*4		/* padding; HWI masks are elsewhere */
412
413	.long	SWI_TTY_MASK, SWI_NET_MASK
414	.long	0, 0, 0, 0
415	.long	SWI_CLOCK_MASK, SWI_AST_MASK
416
417	.globl _ivectors
418_ivectors:
419	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
420	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
421	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
422	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
423	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
424	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
425
426/* active flag for lazy masking */
427iactive:
428	.long	0
429
430#ifdef COUNT_SPURIOUS_INTS
431	.globl	_sihits
432_sihits:
433	.long	0
434#endif /* COUNT_SPURIOUS_INTS */
435
436#ifdef COUNT_XINVLTLB_HITS
437	.globl	_xhits
438_xhits:
439	.space	(NCPU * 4), 0
440#endif /* COUNT_XINVLTLB_HITS */
441
442/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
443	.globl _stopped_cpus, _started_cpus
444_stopped_cpus:
445	.long	0
446_started_cpus:
447	.long	0
448
449#ifdef COUNT_CSHITS
450	.globl	_cshits
451_cshits:
452	.space	(NCPU * 4), 0
453#endif /* COUNT_CSHITS */
454
455#ifdef PEND_INTS
456	.globl	_apic_pin_trigger
457_apic_pin_trigger:
458	.space	(NAPIC * 4), 0
459#endif /* PEND_INTS */
460
461
462/*
463 * Interrupt counters and names.  The format of these and the label names
464 * must agree with what vmstat expects.  The tables are indexed by device
465 * ids so that we don't have to move the names around as devices are
466 * attached.
467 */
468#include "vector.h"
469	.globl	_intrcnt, _eintrcnt
470_intrcnt:
471	.space	(NR_DEVICES + ICU_LEN) * 4
472_eintrcnt:
473
474	.globl	_intrnames, _eintrnames
475_intrnames:
476	.ascii	DEVICE_NAMES
477	.asciz	"stray irq0"
478	.asciz	"stray irq1"
479	.asciz	"stray irq2"
480	.asciz	"stray irq3"
481	.asciz	"stray irq4"
482	.asciz	"stray irq5"
483	.asciz	"stray irq6"
484	.asciz	"stray irq7"
485	.asciz	"stray irq8"
486	.asciz	"stray irq9"
487	.asciz	"stray irq10"
488	.asciz	"stray irq11"
489	.asciz	"stray irq12"
490	.asciz	"stray irq13"
491	.asciz	"stray irq14"
492	.asciz	"stray irq15"
493	.asciz	"stray irq16"
494	.asciz	"stray irq17"
495	.asciz	"stray irq18"
496	.asciz	"stray irq19"
497	.asciz	"stray irq20"
498	.asciz	"stray irq21"
499	.asciz	"stray irq22"
500	.asciz	"stray irq23"
501_eintrnames:
502
503	.text
504