apic_vector.s revision 27634
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.14 1997/07/23 20:18:14 smp Exp smp $
4 */
5
6
7#include <machine/smptests.h>		/** various counters */
8#include "i386/isa/intr_machdep.h"
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
18 */
19#if 1
20#define MAYBE_MASK_IRQ(irq_num)						\
21	lock ;					/* MP-safe */		\
22	btsl	$(irq_num),iactive ;		/* lazy masking */	\
23	jnc	1f ;				/* NOT active */	\
24	IMASK_LOCK ;				/* enter critical reg */\
25	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
26	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
27	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
28	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
29	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
30	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
31	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
32	movl	$0, lapic_eoi ;			/* do the EOI */	\
33	IMASK_UNLOCK ;				/* exit critical reg */	\
34	popl	%es ;							\
35	popl	%ds ;							\
36	popal ;								\
37	addl	$4+4,%esp ;						\
38	iret ;								\
39;									\
40	ALIGN_TEXT ;							\
411:	GET_MPLOCK				/* SMP Spin lock */
42#else
43#define MAYBE_MASK_IRQ(irq_num)						\
44	GET_MPLOCK ;				/* SMP Spin lock */	\
45	lock ;					/* MP-safe */		\
46	btsl	$(irq_num),iactive ;		/* lazy masking */	\
47	jnc	1f ;				/* NOT active */	\
48	/* XXX atomic access */						\
49	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
50	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
51	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
52	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
53	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
54	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
55	movl	$0, lapic_eoi ;			/* do the EOI */	\
56	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
57	REL_MPLOCK ;				/* release SMP GL */	\
58	popl	%es ;							\
59	popl	%ds ;							\
60	popal ;								\
61	addl	$4+4,%esp ;						\
62	iret ;								\
63;									\
64	ALIGN_TEXT ;							\
651:
66#endif
67
68#if 1
69#define MAYBE_UNMASK_IRQ(irq_num)					\
70	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
71	lock ;					/* MP-safe */		\
72	andl	$~IRQ_BIT(irq_num),iactive ;				\
73	IMASK_LOCK ;				/* enter critical reg */\
74	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
75	je	2f ;							\
76	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
77	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
78	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
79	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
80	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
81	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
822: ;									\
83	IMASK_UNLOCK ;				/* exit critical reg */	\
84	sti	/* XXX _doreti repeats the cli/sti */
85#else
86#define MAYBE_UNMASK_IRQ(irq_num)					\
87	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
88	lock ;					/* MP-safe */		\
89	andl	$~IRQ_BIT(irq_num),iactive ;				\
90	/* XXX atomic access */						\
91	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
92	je	2f ;							\
93	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
94	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
95	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
96	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
97	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
98	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
992: ;									\
100	sti ;	/* XXX _doreti repeats the cli/sti */
101#endif
102
103
104/*
105 * Macros for interrupt interrupt entry, call to handler, and exit.
106 */
107
108#define	FAST_INTR(irq_num, vec_name)					\
109	.text ;								\
110	SUPERALIGN_TEXT ;						\
111IDTVEC(vec_name) ;							\
112	pushl	%eax ;		/* save only call-used registers */	\
113	pushl	%ecx ;							\
114	pushl	%edx ;							\
115	pushl	%ds ;							\
116	MAYBE_PUSHL_ES ;						\
117	movl	$KDSEL,%eax ;						\
118	movl	%ax,%ds ;						\
119	MAYBE_MOVW_AX_ES ;						\
120	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
121	GET_MPLOCK ;		/* SMP Spin lock */			\
122	pushl	_intr_unit + (irq_num) * 4 ;				\
123	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
124	movl	$0, lapic_eoi ;						\
125	addl	$4,%esp ;						\
126	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
127	movl	_intr_countp + (irq_num) * 4,%eax ;			\
128	incl	(%eax) ;						\
129	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
130	notl	%eax ;							\
131	andl	_ipending,%eax ;					\
132	jne	2f ; 		/* yes, maybe handle them */		\
1331: ;									\
134	MEXITCOUNT ;							\
135	REL_MPLOCK ;		/* SMP release global lock */		\
136	MAYBE_POPL_ES ;							\
137	popl	%ds ;							\
138	popl	%edx ;							\
139	popl	%ecx ;							\
140	popl	%eax ;							\
141	iret ;								\
142;									\
143	ALIGN_TEXT ;							\
1442: ;									\
145	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
146	jae	1b ;		/* no, return */			\
147	movl	_cpl,%eax ;						\
148	/* XXX next line is probably unnecessary now. */		\
149	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
150	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
151	sti ;			/* to do this as early as possible */	\
152	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
153	popl	%ecx ;		/* ... original %ds ... */		\
154	popl	%edx ;							\
155	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
156	pushal ;		/* build fat frame (grrr) ... */	\
157	pushl	%ecx ;		/* ... actually %ds ... */		\
158	pushl	%es ;							\
159	movl	$KDSEL,%eax ;						\
160	movl	%ax,%es ;						\
161	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
162	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
163	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
164	pushl	%eax ;							\
165	subl	$4,%esp ;	/* junk for unit number */		\
166	MEXITCOUNT ;							\
167	jmp	_doreti
168
169#define	INTR(irq_num, vec_name)						\
170	.text ;								\
171	SUPERALIGN_TEXT ;						\
172IDTVEC(vec_name) ;							\
173	pushl	$0 ;		/* dummy error code */			\
174	pushl	$0 ;		/* dummy trap type */			\
175	pushal ;							\
176	pushl	%ds ;		/* save data and extra segments ... */	\
177	pushl	%es ;							\
178	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
179	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
180	movl	%ax,%es ;						\
181	MAYBE_MASK_IRQ(irq_num) ;					\
182	movl	$0, lapic_eoi ;						\
183	movl	_cpl,%eax ;						\
184	testl	$IRQ_BIT(irq_num), %eax ;				\
185	jne	3f ;							\
186	incb	_intr_nesting_level ;					\
187__CONCAT(Xresume,irq_num): ;						\
188	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
189	incl	_cnt+V_INTR ;	/* tally interrupts */			\
190	movl	_intr_countp + (irq_num) * 4,%eax ;			\
191	incl	(%eax) ;						\
192	movl	_cpl,%eax ;						\
193	pushl	%eax ;							\
194	pushl	_intr_unit + (irq_num) * 4 ;				\
195	orl	_intr_mask + (irq_num) * 4,%eax ;			\
196	movl	%eax,_cpl ;						\
197	sti ;								\
198	call	*_intr_handler + (irq_num) * 4 ;			\
199	MAYBE_UNMASK_IRQ(irq_num) ;					\
200	MEXITCOUNT ;							\
201	jmp	_doreti ;						\
202;									\
203	ALIGN_TEXT ;							\
2043: ;									\
205	/* XXX skip mcounting here to avoid double count */		\
206	orl	$IRQ_BIT(irq_num), _ipending ;				\
207	REL_MPLOCK ;		/* SMP release global lock */		\
208	popl	%es ;							\
209	popl	%ds ;							\
210	popal ;								\
211	addl	$4+4,%esp ;						\
212	iret
213
214
215/*
216 * Handle "spurious INTerrupts".
217 * Notes:
218 *  This is different than the "spurious INTerrupt" generated by an
219 *   8259 PIC for missing INTs.  See the APIC documentation for details.
220 *  This routine should NOT do an 'EOI' cycle.
221 */
222	.text
223	SUPERALIGN_TEXT
224	.globl _Xspuriousint
225_Xspuriousint:
226#ifdef COUNT_SPURIOUS_INTS
227	ss
228	incl	_sihits
229#endif
230
231	/* No EOI cycle used here */
232
233	iret
234
235
236/*
237 * Handle TLB shootdowns.
238 */
239	.text
240	SUPERALIGN_TEXT
241	.globl	_Xinvltlb
242_Xinvltlb:
243	pushl	%eax
244
245#ifdef COUNT_XINVLTLB_HITS
246	ss
247	movl	_cpuid, %eax
248	ss
249	incl	_xhits(,%eax,4)
250#endif /* COUNT_XINVLTLB_HITS */
251
252	movl	%cr3, %eax		/* invalidate the TLB */
253	movl	%eax, %cr3
254
255	ss				/* stack segment, avoid %ds load */
256	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
257
258	popl	%eax
259	iret
260
261
262/*
263 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
264 *
265 *  - Signals its receipt.
266 *  - Waits for permission to restart.
267 *  - Signals its restart.
268 */
269
270	.text
271	SUPERALIGN_TEXT
272	.globl _Xcpustop
273_Xcpustop:
274	pushl	%eax
275	pushl	%ds			/* save current data segment */
276
277	movl	$KDSEL, %eax
278	movl	%ax, %ds		/* use KERNEL data segment */
279
280	movl	_cpuid, %eax
281
282#ifdef COUNT_CSHITS
283	incl	_cshits(,%eax,4)
284#endif /* COUNT_CSHITS */
285
286	ASMPOSTCODE_HI(0x1)
287
288	lock
289	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
290
291	ASMPOSTCODE_HI(0x2);
2921:
293	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
294	jnc	1b
295
296	ASMPOSTCODE_HI(0x3)
297
298	lock
299	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
300
301	ASMPOSTCODE_HI(0x4)
302
303	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
304
305	popl	%ds			/* restore previous data segment */
306	popl	%eax
307	iret
308
309
310MCOUNT_LABEL(bintr)
311	FAST_INTR(0,fastintr0)
312	FAST_INTR(1,fastintr1)
313	FAST_INTR(2,fastintr2)
314	FAST_INTR(3,fastintr3)
315	FAST_INTR(4,fastintr4)
316	FAST_INTR(5,fastintr5)
317	FAST_INTR(6,fastintr6)
318	FAST_INTR(7,fastintr7)
319	FAST_INTR(8,fastintr8)
320	FAST_INTR(9,fastintr9)
321	FAST_INTR(10,fastintr10)
322	FAST_INTR(11,fastintr11)
323	FAST_INTR(12,fastintr12)
324	FAST_INTR(13,fastintr13)
325	FAST_INTR(14,fastintr14)
326	FAST_INTR(15,fastintr15)
327	FAST_INTR(16,fastintr16)
328	FAST_INTR(17,fastintr17)
329	FAST_INTR(18,fastintr18)
330	FAST_INTR(19,fastintr19)
331	FAST_INTR(20,fastintr20)
332	FAST_INTR(21,fastintr21)
333	FAST_INTR(22,fastintr22)
334	FAST_INTR(23,fastintr23)
335	INTR(0,intr0)
336	INTR(1,intr1)
337	INTR(2,intr2)
338	INTR(3,intr3)
339	INTR(4,intr4)
340	INTR(5,intr5)
341	INTR(6,intr6)
342	INTR(7,intr7)
343	INTR(8,intr8)
344	INTR(9,intr9)
345	INTR(10,intr10)
346	INTR(11,intr11)
347	INTR(12,intr12)
348	INTR(13,intr13)
349	INTR(14,intr14)
350	INTR(15,intr15)
351	INTR(16,intr16)
352	INTR(17,intr17)
353	INTR(18,intr18)
354	INTR(19,intr19)
355	INTR(20,intr20)
356	INTR(21,intr21)
357	INTR(22,intr22)
358	INTR(23,intr23)
359MCOUNT_LABEL(eintr)
360
361	.data
362ihandlers:			/* addresses of interrupt handlers */
363				/* actually resumption addresses for HWI's */
364	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
365	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
366	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
367	.long	Xresume12, Xresume13, Xresume14, Xresume15
368	.long	Xresume16, Xresume17, Xresume18, Xresume19
369	.long	Xresume20, Xresume21, Xresume22, Xresume23
370	.long	swi_tty,   swi_net
371	.long	0, 0, 0, 0
372	.long	_softclock, swi_ast
373
374imasks:				/* masks for interrupt handlers */
375	.space	NHWI*4		/* padding; HWI masks are elsewhere */
376
377	.long	SWI_TTY_MASK, SWI_NET_MASK
378	.long	0, 0, 0, 0
379	.long	SWI_CLOCK_MASK, SWI_AST_MASK
380
381	.globl _ivectors
382_ivectors:
383	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
384	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
385	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
386	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
387	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
388	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
389
390/* active flag for lazy masking */
391iactive:
392	.long	0
393
394#ifdef COUNT_SPURIOUS_INTS
395	.globl	_sihits
396_sihits:
397	.long	0
398#endif /* COUNT_SPURIOUS_INTS */
399
400#ifdef COUNT_XINVLTLB_HITS
401	.globl	_xhits
402_xhits:
403	.space	(NCPU * 4), 0
404#endif /* COUNT_XINVLTLB_HITS */
405
406/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
407	.globl _stopped_cpus, _started_cpus
408_stopped_cpus:
409	.long	0
410_started_cpus:
411	.long	0
412
413#ifdef COUNT_CSHITS
414	.globl	_cshits
415_cshits:
416	.space	(NCPU * 4), 0
417#endif /* COUNT_CSHITS */
418
419
420/*
421 * Interrupt counters and names.  The format of these and the label names
422 * must agree with what vmstat expects.  The tables are indexed by device
423 * ids so that we don't have to move the names around as devices are
424 * attached.
425 */
426#include "vector.h"
427	.globl	_intrcnt, _eintrcnt
428_intrcnt:
429	.space	(NR_DEVICES + ICU_LEN) * 4
430_eintrcnt:
431
432	.globl	_intrnames, _eintrnames
433_intrnames:
434	.ascii	DEVICE_NAMES
435	.asciz	"stray irq0"
436	.asciz	"stray irq1"
437	.asciz	"stray irq2"
438	.asciz	"stray irq3"
439	.asciz	"stray irq4"
440	.asciz	"stray irq5"
441	.asciz	"stray irq6"
442	.asciz	"stray irq7"
443	.asciz	"stray irq8"
444	.asciz	"stray irq9"
445	.asciz	"stray irq10"
446	.asciz	"stray irq11"
447	.asciz	"stray irq12"
448	.asciz	"stray irq13"
449	.asciz	"stray irq14"
450	.asciz	"stray irq15"
451	.asciz	"stray irq16"
452	.asciz	"stray irq17"
453	.asciz	"stray irq18"
454	.asciz	"stray irq19"
455	.asciz	"stray irq20"
456	.asciz	"stray irq21"
457	.asciz	"stray irq22"
458	.asciz	"stray irq23"
459_eintrnames:
460
461	.text
462