apic_vector.s revision 28023
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
4 */
5
6
7#include <machine/smp.h>
8#include <machine/smptests.h>		/** PEND_INTS, various counters */
9#include "i386/isa/intr_machdep.h"
10
11/* convert an absolute IRQ# into a bitmask */
12#define IRQ_BIT(irq_num)	(1 << (irq_num))
13
14/* make an index into the IO APIC from the IRQ# */
15#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
16
17/*
18 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
19 */
20
21#ifdef PEND_INTS
22
23/*
24 * the 1st version fails because masked edge-triggered INTs are lost
25 * by the IO APIC.  This version tests to see whether we are handling
26 * an edge or level triggered INT.  Level-triggered INTs must still be
27 * masked as we don't clear the source, and the EOI cycle would allow
28 * recursive INTs to occur.
29 */
30#define MAYBE_MASK_IRQ(irq_num)						\
31	lock ;					/* MP-safe */		\
32	btsl	$(irq_num),iactive ;		/* lazy masking */	\
33	jc	6f ;				/* already active */	\
34	TRY_ISRLOCK(irq_num) ;			/* try to get lock */	\
35	testl	%eax, %eax ;			/* did we get it? */	\
36	jnz	8f ;				/* yes, enter kernel */	\
376: ;						/* active or locked */	\
38	IMASK_LOCK ;				/* into critical reg */	\
39	testl	$IRQ_BIT(irq_num),_apic_pin_trigger ;			\
40	jz	7f ;				/* edge, don't mask */	\
41	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
42	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
43	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
44	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
45	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
46	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
477: ;									\
48	lock ;					/* MP-safe */		\
49	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
50	IMASK_UNLOCK ;				/* exit critical reg */	\
51	movl	$0, lapic_eoi ;			/* do the EOI */	\
52	popl	%es ;							\
53	popl	%ds ;							\
54	popal ;								\
55	addl	$4+4,%esp ;						\
56	iret ;								\
57;									\
58	ALIGN_TEXT ;							\
598:
60
61#else /* PEND_INTS */
62
63#define MAYBE_MASK_IRQ(irq_num)						\
64	lock ;					/* MP-safe */		\
65	btsl	$(irq_num),iactive ;		/* lazy masking */	\
66	jnc	1f ;				/* NOT active */	\
67	IMASK_LOCK ;				/* enter critical reg */\
68	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
69	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
70	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
71	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
72	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
73	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
74	lock ;					/* MP-safe */		\
75	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
76	movl	$0, lapic_eoi ;			/* do the EOI */	\
77	IMASK_UNLOCK ;				/* exit critical reg */	\
78	popl	%es ;							\
79	popl	%ds ;							\
80	popal ;								\
81	addl	$4+4,%esp ;						\
82	iret ;								\
83;									\
84	ALIGN_TEXT ;							\
851: ;									\
86	GET_MPLOCK				/* SMP Spin lock */
87
88#endif /* PEND_INTS */
89
90
91#define MAYBE_UNMASK_IRQ(irq_num)					\
92	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
93	lock ;					/* MP-safe */		\
94	andl	$~IRQ_BIT(irq_num),iactive ;				\
95	IMASK_LOCK ;				/* enter critical reg */\
96	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
97	je	9f ;							\
98	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
99	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
100	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
101	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
102	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
103	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1049: ;									\
105	IMASK_UNLOCK ;				/* exit critical reg */	\
106	sti	/* XXX _doreti repeats the cli/sti */
107
108
109/*
110 * Macros for interrupt interrupt entry, call to handler, and exit.
111 */
112
113#define	FAST_INTR(irq_num, vec_name)					\
114	.text ;								\
115	SUPERALIGN_TEXT ;						\
116IDTVEC(vec_name) ;							\
117	pushl	%eax ;		/* save only call-used registers */	\
118	pushl	%ecx ;							\
119	pushl	%edx ;							\
120	pushl	%ds ;							\
121	MAYBE_PUSHL_ES ;						\
122	movl	$KDSEL,%eax ;						\
123	movl	%ax,%ds ;						\
124	MAYBE_MOVW_AX_ES ;						\
125	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
126	GET_ISRLOCK(irq_num) ;						\
127	pushl	_intr_unit + (irq_num) * 4 ;				\
128	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
129	movl	$0, lapic_eoi ;						\
130	addl	$4,%esp ;						\
131	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
132	movl	_intr_countp + (irq_num) * 4,%eax ;			\
133	incl	(%eax) ;						\
134	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
135	notl	%eax ;							\
136	andl	_ipending,%eax ;					\
137	jne	2f ; 		/* yes, maybe handle them */		\
1381: ;									\
139	MEXITCOUNT ;							\
140	REL_ISRLOCK(irq_num) ;						\
141	MAYBE_POPL_ES ;							\
142	popl	%ds ;							\
143	popl	%edx ;							\
144	popl	%ecx ;							\
145	popl	%eax ;							\
146	iret ;								\
147;									\
148	ALIGN_TEXT ;							\
1492: ;									\
150	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
151	jae	1b ;		/* no, return */			\
152	movl	_cpl,%eax ;						\
153	/* XXX next line is probably unnecessary now. */		\
154	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
155	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
156	sti ;			/* to do this as early as possible */	\
157	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
158	popl	%ecx ;		/* ... original %ds ... */		\
159	popl	%edx ;							\
160	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
161	pushal ;		/* build fat frame (grrr) ... */	\
162	pushl	%ecx ;		/* ... actually %ds ... */		\
163	pushl	%es ;							\
164	movl	$KDSEL,%eax ;						\
165	movl	%ax,%es ;						\
166	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
167	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
168	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
169	pushl	%eax ;							\
170	subl	$4,%esp ;	/* junk for unit number */		\
171	MEXITCOUNT ;							\
172	jmp	_doreti
173
174#define	INTR(irq_num, vec_name)						\
175	.text ;								\
176	SUPERALIGN_TEXT ;						\
177IDTVEC(vec_name) ;							\
178	pushl	$0 ;		/* dummy error code */			\
179	pushl	$0 ;		/* dummy trap type */			\
180	pushal ;							\
181	pushl	%ds ;		/* save data and extra segments ... */	\
182	pushl	%es ;							\
183	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
184	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
185	movl	%ax,%es ;						\
186	MAYBE_MASK_IRQ(irq_num) ;					\
187	movl	$0, lapic_eoi ;						\
188	movl	_cpl,%eax ;						\
189	testl	$IRQ_BIT(irq_num), %eax ;				\
190	jne	3f ;							\
191	incb	_intr_nesting_level ;					\
192__CONCAT(Xresume,irq_num): ;						\
193	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
194	incl	_cnt+V_INTR ;	/* tally interrupts */			\
195	movl	_intr_countp + (irq_num) * 4,%eax ;			\
196	incl	(%eax) ;						\
197	movl	_cpl,%eax ;						\
198	pushl	%eax ;							\
199	pushl	_intr_unit + (irq_num) * 4 ;				\
200	orl	_intr_mask + (irq_num) * 4,%eax ;			\
201	movl	%eax,_cpl ;						\
202	sti ;								\
203	call	*_intr_handler + (irq_num) * 4 ;			\
204	MAYBE_UNMASK_IRQ(irq_num) ;					\
205	MEXITCOUNT ;							\
206	jmp	_doreti ;						\
207;									\
208	ALIGN_TEXT ;							\
2093: ;									\
210	/* XXX skip mcounting here to avoid double count */		\
211	lock ;					/* MP-safe */		\
212	orl	$IRQ_BIT(irq_num), _ipending ;				\
213	REL_ISRLOCK(irq_num) ;						\
214	popl	%es ;							\
215	popl	%ds ;							\
216	popal ;								\
217	addl	$4+4,%esp ;						\
218	iret
219
220
221/*
222 * Handle "spurious INTerrupts".
223 * Notes:
224 *  This is different than the "spurious INTerrupt" generated by an
225 *   8259 PIC for missing INTs.  See the APIC documentation for details.
226 *  This routine should NOT do an 'EOI' cycle.
227 */
228	.text
229	SUPERALIGN_TEXT
230	.globl _Xspuriousint
231_Xspuriousint:
232#ifdef COUNT_SPURIOUS_INTS
233	ss
234	incl	_sihits
235#endif
236
237	/* No EOI cycle used here */
238
239	iret
240
241
242/*
243 * Handle TLB shootdowns.
244 */
245	.text
246	SUPERALIGN_TEXT
247	.globl	_Xinvltlb
248_Xinvltlb:
249	pushl	%eax
250
251#ifdef COUNT_XINVLTLB_HITS
252	ss
253	movl	_cpuid, %eax
254	ss
255	incl	_xhits(,%eax,4)
256#endif /* COUNT_XINVLTLB_HITS */
257
258	movl	%cr3, %eax		/* invalidate the TLB */
259	movl	%eax, %cr3
260
261	ss				/* stack segment, avoid %ds load */
262	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
263
264	popl	%eax
265	iret
266
267
268/*
269 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
270 *
271 *  - Signals its receipt.
272 *  - Waits for permission to restart.
273 *  - Signals its restart.
274 */
275
276	.text
277	SUPERALIGN_TEXT
278	.globl _Xcpustop
279_Xcpustop:
280	pushl	%eax
281	pushl	%ds			/* save current data segment */
282
283	movl	$KDSEL, %eax
284	movl	%ax, %ds		/* use KERNEL data segment */
285
286	movl	_cpuid, %eax
287
288#ifdef COUNT_CSHITS
289	incl	_cshits(,%eax,4)
290#endif /* COUNT_CSHITS */
291
292	ASMPOSTCODE_HI(0x1)
293
294	lock
295	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
296
297	ASMPOSTCODE_HI(0x2);
2981:
299	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
300	jnc	1b
301
302	ASMPOSTCODE_HI(0x3)
303
304	lock
305	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
306
307	ASMPOSTCODE_HI(0x4)
308
309	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
310
311	popl	%ds			/* restore previous data segment */
312	popl	%eax
313	iret
314
315
316MCOUNT_LABEL(bintr)
317	FAST_INTR(0,fastintr0)
318	FAST_INTR(1,fastintr1)
319	FAST_INTR(2,fastintr2)
320	FAST_INTR(3,fastintr3)
321	FAST_INTR(4,fastintr4)
322	FAST_INTR(5,fastintr5)
323	FAST_INTR(6,fastintr6)
324	FAST_INTR(7,fastintr7)
325	FAST_INTR(8,fastintr8)
326	FAST_INTR(9,fastintr9)
327	FAST_INTR(10,fastintr10)
328	FAST_INTR(11,fastintr11)
329	FAST_INTR(12,fastintr12)
330	FAST_INTR(13,fastintr13)
331	FAST_INTR(14,fastintr14)
332	FAST_INTR(15,fastintr15)
333	FAST_INTR(16,fastintr16)
334	FAST_INTR(17,fastintr17)
335	FAST_INTR(18,fastintr18)
336	FAST_INTR(19,fastintr19)
337	FAST_INTR(20,fastintr20)
338	FAST_INTR(21,fastintr21)
339	FAST_INTR(22,fastintr22)
340	FAST_INTR(23,fastintr23)
341	INTR(0,intr0)
342	INTR(1,intr1)
343	INTR(2,intr2)
344	INTR(3,intr3)
345	INTR(4,intr4)
346	INTR(5,intr5)
347	INTR(6,intr6)
348	INTR(7,intr7)
349	INTR(8,intr8)
350	INTR(9,intr9)
351	INTR(10,intr10)
352	INTR(11,intr11)
353	INTR(12,intr12)
354	INTR(13,intr13)
355	INTR(14,intr14)
356	INTR(15,intr15)
357	INTR(16,intr16)
358	INTR(17,intr17)
359	INTR(18,intr18)
360	INTR(19,intr19)
361	INTR(20,intr20)
362	INTR(21,intr21)
363	INTR(22,intr22)
364	INTR(23,intr23)
365MCOUNT_LABEL(eintr)
366
367	.data
368ihandlers:			/* addresses of interrupt handlers */
369				/* actually resumption addresses for HWI's */
370	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
371	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
372	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
373	.long	Xresume12, Xresume13, Xresume14, Xresume15
374	.long	Xresume16, Xresume17, Xresume18, Xresume19
375	.long	Xresume20, Xresume21, Xresume22, Xresume23
376	.long	swi_tty,   swi_net
377	.long	0, 0, 0, 0
378	.long	_softclock, swi_ast
379
380imasks:				/* masks for interrupt handlers */
381	.space	NHWI*4		/* padding; HWI masks are elsewhere */
382
383	.long	SWI_TTY_MASK, SWI_NET_MASK
384	.long	0, 0, 0, 0
385	.long	SWI_CLOCK_MASK, SWI_AST_MASK
386
387	.globl _ivectors
388_ivectors:
389	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
390	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
391	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
392	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
393	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
394	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
395
396/* active flag for lazy masking */
397iactive:
398	.long	0
399
400#ifdef COUNT_SPURIOUS_INTS
401	.globl	_sihits
402_sihits:
403	.long	0
404#endif /* COUNT_SPURIOUS_INTS */
405
406#ifdef COUNT_XINVLTLB_HITS
407	.globl	_xhits
408_xhits:
409	.space	(NCPU * 4), 0
410#endif /* COUNT_XINVLTLB_HITS */
411
412/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
413	.globl _stopped_cpus, _started_cpus
414_stopped_cpus:
415	.long	0
416_started_cpus:
417	.long	0
418
419#ifdef COUNT_CSHITS
420	.globl	_cshits
421_cshits:
422	.space	(NCPU * 4), 0
423#endif /* COUNT_CSHITS */
424
425#ifdef PEND_INTS
426	.globl	_apic_pin_trigger
427_apic_pin_trigger:
428	.space	(NAPIC * 4), 0
429#endif /* PEND_INTS */
430
431
432/*
433 * Interrupt counters and names.  The format of these and the label names
434 * must agree with what vmstat expects.  The tables are indexed by device
435 * ids so that we don't have to move the names around as devices are
436 * attached.
437 */
438#include "vector.h"
439	.globl	_intrcnt, _eintrcnt
440_intrcnt:
441	.space	(NR_DEVICES + ICU_LEN) * 4
442_eintrcnt:
443
444	.globl	_intrnames, _eintrnames
445_intrnames:
446	.ascii	DEVICE_NAMES
447	.asciz	"stray irq0"
448	.asciz	"stray irq1"
449	.asciz	"stray irq2"
450	.asciz	"stray irq3"
451	.asciz	"stray irq4"
452	.asciz	"stray irq5"
453	.asciz	"stray irq6"
454	.asciz	"stray irq7"
455	.asciz	"stray irq8"
456	.asciz	"stray irq9"
457	.asciz	"stray irq10"
458	.asciz	"stray irq11"
459	.asciz	"stray irq12"
460	.asciz	"stray irq13"
461	.asciz	"stray irq14"
462	.asciz	"stray irq15"
463	.asciz	"stray irq16"
464	.asciz	"stray irq17"
465	.asciz	"stray irq18"
466	.asciz	"stray irq19"
467	.asciz	"stray irq20"
468	.asciz	"stray irq21"
469	.asciz	"stray irq22"
470	.asciz	"stray irq23"
471_eintrnames:
472
473	.text
474