apic_vector.s revision 27517
1112163Sdas/*
2112163Sdas *	from: vector.s, 386BSD 0.1 unknown origin
3112163Sdas *	$Id: apic_vector.s,v 1.12 1997/07/18 19:47:13 smp Exp smp $
4112163Sdas */
5227753Stheraven
6227753Stheraven
7227753Stheraven#include <machine/smptests.h>		/** various counters */
8227753Stheraven#include "i386/isa/intr_machdep.h"
9227753Stheraven
10112163Sdas/* convert an absolute IRQ# into a bitmask */
11112163Sdas#define IRQ_BIT(irq_num)	(1 << (irq_num))
12112163Sdas
13112163Sdas/* make an index into the IO APIC from the IRQ# */
14112163Sdas#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15112163Sdas
16112163Sdas/*
17112163Sdas * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
18112163Sdas */
19112163Sdas#define MAYBE_MASK_IRQ(irq_num)						\
20112163Sdas	testl	$IRQ_BIT(irq_num),iactive ;	/* lazy masking */	\
21112163Sdas	je	1f ;			/* NOT currently active */	\
22112163Sdas	orl	$IRQ_BIT(irq_num),_imen ;	/* set the mask bit */	\
23112163Sdas	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
24112163Sdas	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
25112163Sdas	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
26112163Sdas	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
27112163Sdas	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
28112163Sdas	movl	$0, lapic_eoi ;						\
29112163Sdas	orl	$IRQ_BIT(irq_num), _ipending ;				\
30112163Sdas	REL_MPLOCK ;			/* SMP release global lock */	\
31112163Sdas	popl	%es ;							\
32112163Sdas	popl	%ds ;							\
33112163Sdas	popal ;								\
34112163Sdas	addl	$4+4,%esp ;						\
35112163Sdas	iret ;								\
36112163Sdas;									\
37112163Sdas	ALIGN_TEXT ;							\
38112163Sdas1: ;									\
39112163Sdas	orl	$IRQ_BIT(irq_num),iactive
40112163Sdas
41112163Sdas
42227753Stheraven#define MAYBE_UNMASK_IRQ(irq_num)					\
43112163Sdas	cli ;	/* must unmask _imen and icu atomically */		\
44112163Sdas	andl	$~IRQ_BIT(irq_num),iactive ;				\
45227753Stheraven	testl	$IRQ_BIT(irq_num),_imen ;				\
46112163Sdas	je	2f ;							\
47112163Sdas	andl	$~IRQ_BIT(irq_num),_imen ;	/* clear mask bit */	\
48227753Stheraven	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
49112163Sdas	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
50	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
51	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
52	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
532: ;									\
54	sti ;	/* XXX _doreti repeats the cli/sti */
55
56
57/*
58 * Macros for interrupt interrupt entry, call to handler, and exit.
59 */
60
61#define	FAST_INTR(irq_num, vec_name)					\
62	.text ;								\
63	SUPERALIGN_TEXT ;						\
64IDTVEC(vec_name) ;							\
65	pushl	%eax ;		/* save only call-used registers */	\
66	pushl	%ecx ;							\
67	pushl	%edx ;							\
68	pushl	%ds ;							\
69	MAYBE_PUSHL_ES ;						\
70	movl	$KDSEL,%eax ;						\
71	movl	%ax,%ds ;						\
72	MAYBE_MOVW_AX_ES ;						\
73	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
74	GET_MPLOCK ;		/* SMP Spin lock */			\
75	pushl	_intr_unit + (irq_num) * 4 ;				\
76	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
77	movl	$0, lapic_eoi ;						\
78	addl	$4,%esp ;						\
79	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
80	movl	_intr_countp + (irq_num) * 4,%eax ;			\
81	incl	(%eax) ;						\
82	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
83	notl	%eax ;							\
84	andl	_ipending,%eax ;					\
85	jne	2f ; 		/* yes, maybe handle them */		\
861: ;									\
87	MEXITCOUNT ;							\
88	REL_MPLOCK ;		/* SMP release global lock */		\
89	MAYBE_POPL_ES ;							\
90	popl	%ds ;							\
91	popl	%edx ;							\
92	popl	%ecx ;							\
93	popl	%eax ;							\
94	iret ;								\
95;									\
96	ALIGN_TEXT ;							\
972: ;									\
98	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
99	jae	1b ;		/* no, return */			\
100	movl	_cpl,%eax ;						\
101	/* XXX next line is probably unnecessary now. */		\
102	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
103	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
104	sti ;			/* to do this as early as possible */	\
105	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
106	popl	%ecx ;		/* ... original %ds ... */		\
107	popl	%edx ;							\
108	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
109	pushal ;		/* build fat frame (grrr) ... */	\
110	pushl	%ecx ;		/* ... actually %ds ... */		\
111	pushl	%es ;							\
112	movl	$KDSEL,%eax ;						\
113	movl	%ax,%es ;						\
114	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
115	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
116	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
117	pushl	%eax ;							\
118	subl	$4,%esp ;	/* junk for unit number */		\
119	MEXITCOUNT ;							\
120	jmp	_doreti
121
122#define	INTR(irq_num, vec_name)						\
123	.text ;								\
124	SUPERALIGN_TEXT ;						\
125IDTVEC(vec_name) ;							\
126	pushl	$0 ;		/* dummy error code */			\
127	pushl	$0 ;		/* dummy trap type */			\
128	pushal ;							\
129	pushl	%ds ;		/* save data and extra segments ... */	\
130	pushl	%es ;							\
131	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
132	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
133	movl	%ax,%es ;						\
134	GET_MPLOCK ;		/* SMP Spin lock */			\
135	MAYBE_MASK_IRQ(irq_num) ;					\
136	movl	$0, lapic_eoi ;						\
137	movl	_cpl,%eax ;						\
138	testl	$IRQ_BIT(irq_num), %eax ;				\
139	jne	3f ;							\
140	incb	_intr_nesting_level ;					\
141__CONCAT(Xresume,irq_num): ;						\
142	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
143	incl	_cnt+V_INTR ;	/* tally interrupts */			\
144	movl	_intr_countp + (irq_num) * 4,%eax ;			\
145	incl	(%eax) ;						\
146	movl	_cpl,%eax ;						\
147	pushl	%eax ;							\
148	pushl	_intr_unit + (irq_num) * 4 ;				\
149	orl	_intr_mask + (irq_num) * 4,%eax ;			\
150	movl	%eax,_cpl ;						\
151	sti ;								\
152	call	*_intr_handler + (irq_num) * 4 ;			\
153	MAYBE_UNMASK_IRQ(irq_num) ;					\
154	MEXITCOUNT ;							\
155	jmp	_doreti ;						\
156;									\
157	ALIGN_TEXT ;							\
1583: ;									\
159	/* XXX skip mcounting here to avoid double count */		\
160	orl	$IRQ_BIT(irq_num), _ipending ;				\
161	REL_MPLOCK ;		/* SMP release global lock */		\
162	popl	%es ;							\
163	popl	%ds ;							\
164	popal ;								\
165	addl	$4+4,%esp ;						\
166	iret
167
168
169/*
170 * Handle "spurious INTerrupts".
171 * Notes:
172 *  This is different than the "spurious INTerrupt" generated by an
173 *   8259 PIC for missing INTs.  See the APIC documentation for details.
174 *  This routine should NOT do an 'EOI' cycle.
175 */
176	.text
177	SUPERALIGN_TEXT
178	.globl _Xspuriousint
179_Xspuriousint:
180#ifdef COUNT_SPURIOUS_INTS
181	ss
182	incl	_sihits
183#endif
184
185	/* No EOI cycle used here */
186
187	iret
188
189
190/*
191 * Handle TLB shootdowns.
192 */
193	.text
194	SUPERALIGN_TEXT
195	.globl	_Xinvltlb
196_Xinvltlb:
197	pushl	%eax
198
199#ifdef COUNT_XINVLTLB_HITS
200	ss
201	movl	_cpuid, %eax
202	ss
203	incl	_xhits(,%eax,4)
204#endif /* COUNT_XINVLTLB_HITS */
205
206	movl	%cr3, %eax		/* invalidate the TLB */
207	movl	%eax, %cr3
208
209	ss				/* stack segment, avoid %ds load */
210	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
211
212	popl	%eax
213	iret
214
215
216/*
217 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
218 *
219 *  - Signals its receipt.
220 *  - Waits for permission to restart.
221 *  - Signals its restart.
222 */
223
224	.text
225	SUPERALIGN_TEXT
226	.globl _Xcpustop
227_Xcpustop:
228	pushl	%eax
229	pushl	%ds			/* save current data segment */
230
231	movl	$KDSEL, %eax
232	movl	%ax, %ds		/* use KERNEL data segment */
233
234	movl	_cpuid, %eax
235
236#ifdef COUNT_CSHITS
237	incl	_cshits(,%eax,4)
238#endif /* COUNT_CSHITS */
239
240	ASMPOSTCODE_HI(0x1)
241
242	lock
243	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
244
245	ASMPOSTCODE_HI(0x2);
2461:
247	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
248	jnc	1b
249
250	ASMPOSTCODE_HI(0x3)
251
252	lock
253	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
254
255	ASMPOSTCODE_HI(0x4)
256
257	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
258
259	popl	%ds			/* restore previous data segment */
260	popl	%eax
261	iret
262
263
264MCOUNT_LABEL(bintr)
265	FAST_INTR(0,fastintr0)
266	FAST_INTR(1,fastintr1)
267	FAST_INTR(2,fastintr2)
268	FAST_INTR(3,fastintr3)
269	FAST_INTR(4,fastintr4)
270	FAST_INTR(5,fastintr5)
271	FAST_INTR(6,fastintr6)
272	FAST_INTR(7,fastintr7)
273	FAST_INTR(8,fastintr8)
274	FAST_INTR(9,fastintr9)
275	FAST_INTR(10,fastintr10)
276	FAST_INTR(11,fastintr11)
277	FAST_INTR(12,fastintr12)
278	FAST_INTR(13,fastintr13)
279	FAST_INTR(14,fastintr14)
280	FAST_INTR(15,fastintr15)
281	FAST_INTR(16,fastintr16)
282	FAST_INTR(17,fastintr17)
283	FAST_INTR(18,fastintr18)
284	FAST_INTR(19,fastintr19)
285	FAST_INTR(20,fastintr20)
286	FAST_INTR(21,fastintr21)
287	FAST_INTR(22,fastintr22)
288	FAST_INTR(23,fastintr23)
289	INTR(0,intr0)
290	INTR(1,intr1)
291	INTR(2,intr2)
292	INTR(3,intr3)
293	INTR(4,intr4)
294	INTR(5,intr5)
295	INTR(6,intr6)
296	INTR(7,intr7)
297	INTR(8,intr8)
298	INTR(9,intr9)
299	INTR(10,intr10)
300	INTR(11,intr11)
301	INTR(12,intr12)
302	INTR(13,intr13)
303	INTR(14,intr14)
304	INTR(15,intr15)
305	INTR(16,intr16)
306	INTR(17,intr17)
307	INTR(18,intr18)
308	INTR(19,intr19)
309	INTR(20,intr20)
310	INTR(21,intr21)
311	INTR(22,intr22)
312	INTR(23,intr23)
313MCOUNT_LABEL(eintr)
314
315	.data
316ihandlers:			/* addresses of interrupt handlers */
317				/* actually resumption addresses for HWI's */
318	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
319	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
320	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
321	.long	Xresume12, Xresume13, Xresume14, Xresume15
322	.long	Xresume16, Xresume17, Xresume18, Xresume19
323	.long	Xresume20, Xresume21, Xresume22, Xresume23
324	.long	swi_tty,   swi_net
325	.long	0, 0, 0, 0
326	.long	_softclock, swi_ast
327
328imasks:				/* masks for interrupt handlers */
329	.space	NHWI*4		/* padding; HWI masks are elsewhere */
330
331	.long	SWI_TTY_MASK, SWI_NET_MASK
332	.long	0, 0, 0, 0
333	.long	SWI_CLOCK_MASK, SWI_AST_MASK
334
335	.globl _ivectors
336_ivectors:
337	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
338	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
339	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
340	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
341	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
342	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
343
344/* active flag for lazy masking */
345iactive:
346	.long	0
347
348#ifdef COUNT_SPURIOUS_INTS
349	.globl	_sihits
350_sihits:
351	.long	0
352#endif /* COUNT_SPURIOUS_INTS */
353
354#ifdef COUNT_XINVLTLB_HITS
355	.globl	_xhits
356_xhits:
357	.space	(NCPU * 4), 0
358#endif /* COUNT_XINVLTLB_HITS */
359
360/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
361	.globl _stopped_cpus, _started_cpus
362_stopped_cpus:
363	.long	0
364_started_cpus:
365	.long	0
366
367#ifdef COUNT_CSHITS
368	.globl	_cshits
369_cshits:
370	.space	(NCPU * 4), 0
371#endif /* COUNT_CSHITS */
372
373
374/*
375 * Interrupt counters and names.  The format of these and the label names
376 * must agree with what vmstat expects.  The tables are indexed by device
377 * ids so that we don't have to move the names around as devices are
378 * attached.
379 */
380#include "vector.h"
381	.globl	_intrcnt, _eintrcnt
382_intrcnt:
383	.space	(NR_DEVICES + ICU_LEN) * 4
384_eintrcnt:
385
386	.globl	_intrnames, _eintrnames
387_intrnames:
388	.ascii	DEVICE_NAMES
389	.asciz	"stray irq0"
390	.asciz	"stray irq1"
391	.asciz	"stray irq2"
392	.asciz	"stray irq3"
393	.asciz	"stray irq4"
394	.asciz	"stray irq5"
395	.asciz	"stray irq6"
396	.asciz	"stray irq7"
397	.asciz	"stray irq8"
398	.asciz	"stray irq9"
399	.asciz	"stray irq10"
400	.asciz	"stray irq11"
401	.asciz	"stray irq12"
402	.asciz	"stray irq13"
403	.asciz	"stray irq14"
404	.asciz	"stray irq15"
405	.asciz	"stray irq16"
406	.asciz	"stray irq17"
407	.asciz	"stray irq18"
408	.asciz	"stray irq19"
409	.asciz	"stray irq20"
410	.asciz	"stray irq21"
411	.asciz	"stray irq22"
412	.asciz	"stray irq23"
413_eintrnames:
414
415	.text
416