apic_vector.s revision 27696
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.15 1997/07/25 22:20:11 smp Exp smp $
4 */
5
6
7#include <machine/smptests.h>		/** various counters */
8#include "i386/isa/intr_machdep.h"
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
18 */
19
20#ifdef PEND_INTS
21
22#define MAYBE_MASK_IRQ(irq_num)						\
23	lock ;					/* MP-safe */		\
24	btsl	$(irq_num),iactive ;		/* lazy masking */	\
25	jnc	8f ;				/* NOT active */	\
267: ;									\
27	IMASK_LOCK ;				/* enter critical reg */\
28	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
29	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
30	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
31	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
32	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
33	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
34	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
35	IMASK_UNLOCK ;				/* exit critical reg */	\
36	movl	$0, lapic_eoi ;			/* do the EOI */	\
37	popl	%es ;							\
38	popl	%ds ;							\
39	popal ;								\
40	addl	$4+4,%esp ;						\
41	iret ;								\
42;									\
43	ALIGN_TEXT ;							\
448: ;									\
45	call	_try_mplock ;						\
46	testl	%eax, %eax ;						\
47	jz	7b				/* can't enter kernel */
48
49#else /* PEND_INTS */
50
51#define MAYBE_MASK_IRQ(irq_num)						\
52	lock ;					/* MP-safe */		\
53	btsl	$(irq_num),iactive ;		/* lazy masking */	\
54	jnc	1f ;				/* NOT active */	\
55	IMASK_LOCK ;				/* enter critical reg */\
56	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
57	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
58	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
59	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
60	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
61	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
62	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
63	movl	$0, lapic_eoi ;			/* do the EOI */	\
64	IMASK_UNLOCK ;				/* exit critical reg */	\
65	popl	%es ;							\
66	popl	%ds ;							\
67	popal ;								\
68	addl	$4+4,%esp ;						\
69	iret ;								\
70;									\
71	ALIGN_TEXT ;							\
721: ;									\
73	GET_MPLOCK				/* SMP Spin lock */
74
75#endif /* PEND_INTS */
76
77
78#define MAYBE_UNMASK_IRQ(irq_num)					\
79	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
80	lock ;					/* MP-safe */		\
81	andl	$~IRQ_BIT(irq_num),iactive ;				\
82	IMASK_LOCK ;				/* enter critical reg */\
83	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
84	je	9f ;							\
85	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
86	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
87	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
88	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
89	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
90	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
919: ;									\
92	IMASK_UNLOCK ;				/* exit critical reg */	\
93	sti	/* XXX _doreti repeats the cli/sti */
94
95
96/*
97 * Macros for interrupt interrupt entry, call to handler, and exit.
98 */
99
100#define	FAST_INTR(irq_num, vec_name)					\
101	.text ;								\
102	SUPERALIGN_TEXT ;						\
103IDTVEC(vec_name) ;							\
104	pushl	%eax ;		/* save only call-used registers */	\
105	pushl	%ecx ;							\
106	pushl	%edx ;							\
107	pushl	%ds ;							\
108	MAYBE_PUSHL_ES ;						\
109	movl	$KDSEL,%eax ;						\
110	movl	%ax,%ds ;						\
111	MAYBE_MOVW_AX_ES ;						\
112	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
113	GET_MPLOCK ;							\
114	pushl	_intr_unit + (irq_num) * 4 ;				\
115	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
116	movl	$0, lapic_eoi ;						\
117	addl	$4,%esp ;						\
118	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
119	movl	_intr_countp + (irq_num) * 4,%eax ;			\
120	incl	(%eax) ;						\
121	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
122	notl	%eax ;							\
123	andl	_ipending,%eax ;					\
124	jne	2f ; 		/* yes, maybe handle them */		\
1251: ;									\
126	MEXITCOUNT ;							\
127	REL_MPLOCK ;		/* SMP release global lock */		\
128	MAYBE_POPL_ES ;							\
129	popl	%ds ;							\
130	popl	%edx ;							\
131	popl	%ecx ;							\
132	popl	%eax ;							\
133	iret ;								\
134;									\
135	ALIGN_TEXT ;							\
1362: ;									\
137	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
138	jae	1b ;		/* no, return */			\
139	movl	_cpl,%eax ;						\
140	/* XXX next line is probably unnecessary now. */		\
141	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
142	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
143	sti ;			/* to do this as early as possible */	\
144	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
145	popl	%ecx ;		/* ... original %ds ... */		\
146	popl	%edx ;							\
147	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
148	pushal ;		/* build fat frame (grrr) ... */	\
149	pushl	%ecx ;		/* ... actually %ds ... */		\
150	pushl	%es ;							\
151	movl	$KDSEL,%eax ;						\
152	movl	%ax,%es ;						\
153	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
154	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
155	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
156	pushl	%eax ;							\
157	subl	$4,%esp ;	/* junk for unit number */		\
158	MEXITCOUNT ;							\
159	jmp	_doreti
160
161#define	INTR(irq_num, vec_name)						\
162	.text ;								\
163	SUPERALIGN_TEXT ;						\
164IDTVEC(vec_name) ;							\
165	pushl	$0 ;		/* dummy error code */			\
166	pushl	$0 ;		/* dummy trap type */			\
167	pushal ;							\
168	pushl	%ds ;		/* save data and extra segments ... */	\
169	pushl	%es ;							\
170	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
171	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
172	movl	%ax,%es ;						\
173	MAYBE_MASK_IRQ(irq_num) ;					\
174	movl	$0, lapic_eoi ;						\
175	movl	_cpl,%eax ;						\
176	testl	$IRQ_BIT(irq_num), %eax ;				\
177	jne	3f ;							\
178	incb	_intr_nesting_level ;					\
179__CONCAT(Xresume,irq_num): ;						\
180	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
181	incl	_cnt+V_INTR ;	/* tally interrupts */			\
182	movl	_intr_countp + (irq_num) * 4,%eax ;			\
183	incl	(%eax) ;						\
184	movl	_cpl,%eax ;						\
185	pushl	%eax ;							\
186	pushl	_intr_unit + (irq_num) * 4 ;				\
187	orl	_intr_mask + (irq_num) * 4,%eax ;			\
188	movl	%eax,_cpl ;						\
189	sti ;								\
190	call	*_intr_handler + (irq_num) * 4 ;			\
191	MAYBE_UNMASK_IRQ(irq_num) ;					\
192	MEXITCOUNT ;							\
193	jmp	_doreti ;						\
194;									\
195	ALIGN_TEXT ;							\
1963: ;									\
197	/* XXX skip mcounting here to avoid double count */		\
198	orl	$IRQ_BIT(irq_num), _ipending ;				\
199	REL_MPLOCK ;		/* SMP release global lock */		\
200	popl	%es ;							\
201	popl	%ds ;							\
202	popal ;								\
203	addl	$4+4,%esp ;						\
204	iret
205
206
207/*
208 * Handle "spurious INTerrupts".
209 * Notes:
210 *  This is different than the "spurious INTerrupt" generated by an
211 *   8259 PIC for missing INTs.  See the APIC documentation for details.
212 *  This routine should NOT do an 'EOI' cycle.
213 */
214	.text
215	SUPERALIGN_TEXT
216	.globl _Xspuriousint
217_Xspuriousint:
218#ifdef COUNT_SPURIOUS_INTS
219	ss
220	incl	_sihits
221#endif
222
223	/* No EOI cycle used here */
224
225	iret
226
227
228/*
229 * Handle TLB shootdowns.
230 */
231	.text
232	SUPERALIGN_TEXT
233	.globl	_Xinvltlb
234_Xinvltlb:
235	pushl	%eax
236
237#ifdef COUNT_XINVLTLB_HITS
238	ss
239	movl	_cpuid, %eax
240	ss
241	incl	_xhits(,%eax,4)
242#endif /* COUNT_XINVLTLB_HITS */
243
244	movl	%cr3, %eax		/* invalidate the TLB */
245	movl	%eax, %cr3
246
247	ss				/* stack segment, avoid %ds load */
248	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
249
250	popl	%eax
251	iret
252
253
254/*
255 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
256 *
257 *  - Signals its receipt.
258 *  - Waits for permission to restart.
259 *  - Signals its restart.
260 */
261
262	.text
263	SUPERALIGN_TEXT
264	.globl _Xcpustop
265_Xcpustop:
266	pushl	%eax
267	pushl	%ds			/* save current data segment */
268
269	movl	$KDSEL, %eax
270	movl	%ax, %ds		/* use KERNEL data segment */
271
272	movl	_cpuid, %eax
273
274#ifdef COUNT_CSHITS
275	incl	_cshits(,%eax,4)
276#endif /* COUNT_CSHITS */
277
278	ASMPOSTCODE_HI(0x1)
279
280	lock
281	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
282
283	ASMPOSTCODE_HI(0x2);
2841:
285	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
286	jnc	1b
287
288	ASMPOSTCODE_HI(0x3)
289
290	lock
291	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
292
293	ASMPOSTCODE_HI(0x4)
294
295	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
296
297	popl	%ds			/* restore previous data segment */
298	popl	%eax
299	iret
300
301
302MCOUNT_LABEL(bintr)
303	FAST_INTR(0,fastintr0)
304	FAST_INTR(1,fastintr1)
305	FAST_INTR(2,fastintr2)
306	FAST_INTR(3,fastintr3)
307	FAST_INTR(4,fastintr4)
308	FAST_INTR(5,fastintr5)
309	FAST_INTR(6,fastintr6)
310	FAST_INTR(7,fastintr7)
311	FAST_INTR(8,fastintr8)
312	FAST_INTR(9,fastintr9)
313	FAST_INTR(10,fastintr10)
314	FAST_INTR(11,fastintr11)
315	FAST_INTR(12,fastintr12)
316	FAST_INTR(13,fastintr13)
317	FAST_INTR(14,fastintr14)
318	FAST_INTR(15,fastintr15)
319	FAST_INTR(16,fastintr16)
320	FAST_INTR(17,fastintr17)
321	FAST_INTR(18,fastintr18)
322	FAST_INTR(19,fastintr19)
323	FAST_INTR(20,fastintr20)
324	FAST_INTR(21,fastintr21)
325	FAST_INTR(22,fastintr22)
326	FAST_INTR(23,fastintr23)
327	INTR(0,intr0)
328	INTR(1,intr1)
329	INTR(2,intr2)
330	INTR(3,intr3)
331	INTR(4,intr4)
332	INTR(5,intr5)
333	INTR(6,intr6)
334	INTR(7,intr7)
335	INTR(8,intr8)
336	INTR(9,intr9)
337	INTR(10,intr10)
338	INTR(11,intr11)
339	INTR(12,intr12)
340	INTR(13,intr13)
341	INTR(14,intr14)
342	INTR(15,intr15)
343	INTR(16,intr16)
344	INTR(17,intr17)
345	INTR(18,intr18)
346	INTR(19,intr19)
347	INTR(20,intr20)
348	INTR(21,intr21)
349	INTR(22,intr22)
350	INTR(23,intr23)
351MCOUNT_LABEL(eintr)
352
353	.data
354ihandlers:			/* addresses of interrupt handlers */
355				/* actually resumption addresses for HWI's */
356	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
357	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
358	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
359	.long	Xresume12, Xresume13, Xresume14, Xresume15
360	.long	Xresume16, Xresume17, Xresume18, Xresume19
361	.long	Xresume20, Xresume21, Xresume22, Xresume23
362	.long	swi_tty,   swi_net
363	.long	0, 0, 0, 0
364	.long	_softclock, swi_ast
365
366imasks:				/* masks for interrupt handlers */
367	.space	NHWI*4		/* padding; HWI masks are elsewhere */
368
369	.long	SWI_TTY_MASK, SWI_NET_MASK
370	.long	0, 0, 0, 0
371	.long	SWI_CLOCK_MASK, SWI_AST_MASK
372
373	.globl _ivectors
374_ivectors:
375	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
376	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
377	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
378	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
379	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
380	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
381
382/* active flag for lazy masking */
383iactive:
384	.long	0
385
386#ifdef COUNT_SPURIOUS_INTS
387	.globl	_sihits
388_sihits:
389	.long	0
390#endif /* COUNT_SPURIOUS_INTS */
391
392#ifdef COUNT_XINVLTLB_HITS
393	.globl	_xhits
394_xhits:
395	.space	(NCPU * 4), 0
396#endif /* COUNT_XINVLTLB_HITS */
397
398/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
399	.globl _stopped_cpus, _started_cpus
400_stopped_cpus:
401	.long	0
402_started_cpus:
403	.long	0
404
405#ifdef COUNT_CSHITS
406	.globl	_cshits
407_cshits:
408	.space	(NCPU * 4), 0
409#endif /* COUNT_CSHITS */
410
411
412/*
413 * Interrupt counters and names.  The format of these and the label names
414 * must agree with what vmstat expects.  The tables are indexed by device
415 * ids so that we don't have to move the names around as devices are
416 * attached.
417 */
418#include "vector.h"
419	.globl	_intrcnt, _eintrcnt
420_intrcnt:
421	.space	(NR_DEVICES + ICU_LEN) * 4
422_eintrcnt:
423
424	.globl	_intrnames, _eintrnames
425_intrnames:
426	.ascii	DEVICE_NAMES
427	.asciz	"stray irq0"
428	.asciz	"stray irq1"
429	.asciz	"stray irq2"
430	.asciz	"stray irq3"
431	.asciz	"stray irq4"
432	.asciz	"stray irq5"
433	.asciz	"stray irq6"
434	.asciz	"stray irq7"
435	.asciz	"stray irq8"
436	.asciz	"stray irq9"
437	.asciz	"stray irq10"
438	.asciz	"stray irq11"
439	.asciz	"stray irq12"
440	.asciz	"stray irq13"
441	.asciz	"stray irq14"
442	.asciz	"stray irq15"
443	.asciz	"stray irq16"
444	.asciz	"stray irq17"
445	.asciz	"stray irq18"
446	.asciz	"stray irq19"
447	.asciz	"stray irq20"
448	.asciz	"stray irq21"
449	.asciz	"stray irq22"
450	.asciz	"stray irq23"
451_eintrnames:
452
453	.text
454