apic_vector.s revision 26168
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $
4 */
5
6
7/* convert an absolute IRQ# into a bitmask */
8#define IRQ_BIT(irq_num)	(1 << (irq_num))
9
10/* make an index into the IO APIC from the IRQ# */
11#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
12
13/*
14 * 'lazy masking' code submitted by: Bruce Evans <bde@zeta.org.au>
15 */
16#define MAYBE_MASK_IRQ(irq_num)						\
17	testl	$IRQ_BIT(irq_num),iactive ;	/* lazy masking */	\
18	je	1f ;			/* NOT currently active */	\
19	orl	$IRQ_BIT(irq_num),_imen ;	/* set the mask bit */	\
20	movl	_io_apic_base,%ecx ;		/* io apic addr */	\
21	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
22	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
23	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
24	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
25	movl	_apic_base, %eax ;					\
26	movl	$0, APIC_EOI(%eax) ;					\
27	orl	$IRQ_BIT(irq_num), _ipending ;				\
28	REL_MPLOCK ;			/* SMP release global lock */	\
29	popl	%es ;							\
30	popl	%ds ;							\
31	popal ;								\
32	addl	$4+4,%esp ;						\
33	iret ;								\
34;									\
35	ALIGN_TEXT ;							\
361: ;									\
37	orl	$IRQ_BIT(irq_num),iactive
38
39
40#define MAYBE_UNMASK_IRQ(irq_num)					\
41	cli ;	/* must unmask _imen and icu atomically */		\
42	andl	$~IRQ_BIT(irq_num),iactive ;				\
43	testl	$IRQ_BIT(irq_num),_imen ;				\
44	je	2f ;							\
45	andl	$~IRQ_BIT(irq_num),_imen ;	/* clear mask bit */	\
46	movl	_io_apic_base,%ecx ;		/* io apic addr */	\
47	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
48	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
49	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
50	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
512: ;									\
52	sti ;	/* XXX _doreti repeats the cli/sti */
53
54
55/*
56 * Macros for interrupt interrupt entry, call to handler, and exit.
57 */
58
59#define	FAST_INTR(irq_num, vec_name)					\
60	.text ;								\
61	SUPERALIGN_TEXT ;						\
62IDTVEC(vec_name) ;							\
63	pushl	%eax ;		/* save only call-used registers */	\
64	pushl	%ecx ;							\
65	pushl	%edx ;							\
66	pushl	%ds ;							\
67	MAYBE_PUSHL_ES ;						\
68	movl	$KDSEL,%eax ;						\
69	movl	%ax,%ds ;						\
70	MAYBE_MOVW_AX_ES ;						\
71	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
72	GET_MPLOCK ;		/* SMP Spin lock */			\
73	pushl	_intr_unit + (irq_num) * 4 ;				\
74	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
75	movl	_apic_base, %eax ;					\
76	movl	$0, APIC_EOI(%eax) ;					\
77	addl	$4,%esp ;						\
78	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
79	movl	_intr_countp + (irq_num) * 4,%eax ;			\
80	incl	(%eax) ;						\
81	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
82	notl	%eax ;							\
83	andl	_ipending,%eax ;					\
84	jne	2f ; 		/* yes, maybe handle them */		\
851: ;									\
86	MEXITCOUNT ;							\
87	REL_MPLOCK ;		/* SMP release global lock */		\
88	MAYBE_POPL_ES ;							\
89	popl	%ds ;							\
90	popl	%edx ;							\
91	popl	%ecx ;							\
92	popl	%eax ;							\
93	iret ;								\
94;									\
95	ALIGN_TEXT ;							\
962: ;									\
97	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
98	jae	1b ;		/* no, return */			\
99	movl	_cpl,%eax ;						\
100	/* XXX next line is probably unnecessary now. */		\
101	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
102	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
103	sti ;			/* to do this as early as possible */	\
104	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
105	popl	%ecx ;		/* ... original %ds ... */		\
106	popl	%edx ;							\
107	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
108	pushal ;		/* build fat frame (grrr) ... */	\
109	pushl	%ecx ;		/* ... actually %ds ... */		\
110	pushl	%es ;							\
111	movl	$KDSEL,%eax ;						\
112	movl	%ax,%es ;						\
113	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
114	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
115	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
116	pushl	%eax ;							\
117	subl	$4,%esp ;	/* junk for unit number */		\
118	MEXITCOUNT ;							\
119	jmp	_doreti
120
121#define	INTR(irq_num, vec_name)						\
122	.text ;								\
123	SUPERALIGN_TEXT ;						\
124IDTVEC(vec_name) ;							\
125	pushl	$0 ;		/* dummy error code */			\
126	pushl	$0 ;		/* dummy trap type */			\
127	pushal ;							\
128	pushl	%ds ;		/* save data and extra segments ... */	\
129	pushl	%es ;							\
130	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
131	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
132	movl	%ax,%es ;						\
133	GET_MPLOCK ;		/* SMP Spin lock */			\
134	MAYBE_MASK_IRQ(irq_num) ;					\
135	movl	_apic_base, %eax ;					\
136	movl	$0, APIC_EOI(%eax) ;					\
137	movl	_cpl,%eax ;						\
138	testl	$IRQ_BIT(irq_num), %eax ;				\
139	jne	3f ;							\
140	incb	_intr_nesting_level ;					\
141__CONCAT(Xresume,irq_num): ;						\
142	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
143	incl	_cnt+V_INTR ;	/* tally interrupts */			\
144	movl	_intr_countp + (irq_num) * 4,%eax ;			\
145	incl	(%eax) ;						\
146	movl	_cpl,%eax ;						\
147	pushl	%eax ;							\
148	pushl	_intr_unit + (irq_num) * 4 ;				\
149	orl	_intr_mask + (irq_num) * 4,%eax ;			\
150	movl	%eax,_cpl ;						\
151	sti ;								\
152	call	*_intr_handler + (irq_num) * 4 ;			\
153	MAYBE_UNMASK_IRQ(irq_num) ;					\
154	MEXITCOUNT ;							\
155	jmp	_doreti ;						\
156;									\
157	ALIGN_TEXT ;							\
1583: ;									\
159	/* XXX skip mcounting here to avoid double count */		\
160	orl	$IRQ_BIT(irq_num), _ipending ;				\
161	REL_MPLOCK ;		/* SMP release global lock */		\
162	popl	%es ;							\
163	popl	%ds ;							\
164	popal ;								\
165	addl	$4+4,%esp ;						\
166	iret
167
168	.text
169	SUPERALIGN_TEXT
170	.globl	_Xinvltlb
171_Xinvltlb:
172	pushl	%eax
173	movl	%cr3, %eax		/* invalidate the TLB */
174	movl	%eax, %cr3
175	ss				/* stack segment, avoid %ds load */
176	movl	_apic_base, %eax
177	ss
178	movl	$0, APIC_EOI(%eax)	/* End Of Interrupt to APIC */
179	popl	%eax
180	iret
181
182MCOUNT_LABEL(bintr)
183	FAST_INTR(0,fastintr0)
184	FAST_INTR(1,fastintr1)
185	FAST_INTR(2,fastintr2)
186	FAST_INTR(3,fastintr3)
187	FAST_INTR(4,fastintr4)
188	FAST_INTR(5,fastintr5)
189	FAST_INTR(6,fastintr6)
190	FAST_INTR(7,fastintr7)
191	FAST_INTR(8,fastintr8)
192	FAST_INTR(9,fastintr9)
193	FAST_INTR(10,fastintr10)
194	FAST_INTR(11,fastintr11)
195	FAST_INTR(12,fastintr12)
196	FAST_INTR(13,fastintr13)
197	FAST_INTR(14,fastintr14)
198	FAST_INTR(15,fastintr15)
199	FAST_INTR(16,fastintr16)
200	FAST_INTR(17,fastintr17)
201	FAST_INTR(18,fastintr18)
202	FAST_INTR(19,fastintr19)
203	FAST_INTR(20,fastintr20)
204	FAST_INTR(21,fastintr21)
205	FAST_INTR(22,fastintr22)
206	FAST_INTR(23,fastintr23)
207	INTR(0,intr0)
208	INTR(1,intr1)
209	INTR(2,intr2)
210	INTR(3,intr3)
211	INTR(4,intr4)
212	INTR(5,intr5)
213	INTR(6,intr6)
214	INTR(7,intr7)
215	INTR(8,intr8)
216	INTR(9,intr9)
217	INTR(10,intr10)
218	INTR(11,intr11)
219	INTR(12,intr12)
220	INTR(13,intr13)
221	INTR(14,intr14)
222	INTR(15,intr15)
223	INTR(16,intr16)
224	INTR(17,intr17)
225	INTR(18,intr18)
226	INTR(19,intr19)
227	INTR(20,intr20)
228	INTR(21,intr21)
229	INTR(22,intr22)
230	INTR(23,intr23)
231MCOUNT_LABEL(eintr)
232
233	.data
234ihandlers:			/* addresses of interrupt handlers */
235				/* actually resumption addresses for HWI's */
236	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
237	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
238	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
239	.long	Xresume12, Xresume13, Xresume14, Xresume15
240	.long	Xresume16, Xresume17, Xresume18, Xresume19
241	.long	Xresume20, Xresume21, Xresume22, Xresume23
242	.long	0,         0,         0,         0
243	.long	swi_tty,   swi_net,   _softclock, swi_ast
244
245imasks:				/* masks for interrupt handlers */
246	.space	NHWI*4		/* padding; HWI masks are elsewhere */
247
248	.long	0, 0, 0, 0
249	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK
250
251	.globl _ivectors
252_ivectors:
253	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
254	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
255	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
256	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
257	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
258	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
259
260/* active flag for lazy masking */
261iactive:
262	.long	0
263
264
265/*
266 * Interrupt counters and names.  The format of these and the label names
267 * must agree with what vmstat expects.  The tables are indexed by device
268 * ids so that we don't have to move the names around as devices are
269 * attached.
270 */
271#include "vector.h"
272	.globl	_intrcnt, _eintrcnt
273_intrcnt:
274	.space	(NR_DEVICES + ICU_LEN) * 4
275_eintrcnt:
276
277	.globl	_intrnames, _eintrnames
278_intrnames:
279	.ascii	DEVICE_NAMES
280	.asciz	"stray irq0"
281	.asciz	"stray irq1"
282	.asciz	"stray irq2"
283	.asciz	"stray irq3"
284	.asciz	"stray irq4"
285	.asciz	"stray irq5"
286	.asciz	"stray irq6"
287	.asciz	"stray irq7"
288	.asciz	"stray irq8"
289	.asciz	"stray irq9"
290	.asciz	"stray irq10"
291	.asciz	"stray irq11"
292	.asciz	"stray irq12"
293	.asciz	"stray irq13"
294	.asciz	"stray irq14"
295	.asciz	"stray irq15"
296	.asciz	"stray irq16"
297	.asciz	"stray irq17"
298	.asciz	"stray irq18"
299	.asciz	"stray irq19"
300	.asciz	"stray irq20"
301	.asciz	"stray irq21"
302	.asciz	"stray irq22"
303	.asciz	"stray irq23"
304_eintrnames:
305
306	.text
307