apic_vector.s revision 27352
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.9 1997/07/13 00:18:33 smp Exp smp $
4 */
5
6
7#include <machine/smptests.h>	/** TEST_CPUSTOP */
8
9/* convert an absolute IRQ# into a bitmask */
10#define IRQ_BIT(irq_num)	(1 << (irq_num))
11
12/* make an index into the IO APIC from the IRQ# */
13#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
14
15/*
16 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
17 */
18#define MAYBE_MASK_IRQ(irq_num)						\
19	testl	$IRQ_BIT(irq_num),iactive ;	/* lazy masking */	\
20	je	1f ;			/* NOT currently active */	\
21	orl	$IRQ_BIT(irq_num),_imen ;	/* set the mask bit */	\
22	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
23	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
24	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
25	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
26	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
27	movl	$lapic_eoi, %eax ;					\
28	movl	$0, (%eax) ;						\
29	orl	$IRQ_BIT(irq_num), _ipending ;				\
30	REL_MPLOCK ;			/* SMP release global lock */	\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp ;						\
35	iret ;								\
36;									\
37	ALIGN_TEXT ;							\
381: ;									\
39	orl	$IRQ_BIT(irq_num),iactive
40
41
42#define MAYBE_UNMASK_IRQ(irq_num)					\
43	cli ;	/* must unmask _imen and icu atomically */		\
44	andl	$~IRQ_BIT(irq_num),iactive ;				\
45	testl	$IRQ_BIT(irq_num),_imen ;				\
46	je	2f ;							\
47	andl	$~IRQ_BIT(irq_num),_imen ;	/* clear mask bit */	\
48	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
49	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
50	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
51	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
52	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
532: ;									\
54	sti ;	/* XXX _doreti repeats the cli/sti */
55
56
57/*
58 * Macros for interrupt interrupt entry, call to handler, and exit.
59 */
60
61#define	FAST_INTR(irq_num, vec_name)					\
62	.text ;								\
63	SUPERALIGN_TEXT ;						\
64IDTVEC(vec_name) ;							\
65	pushl	%eax ;		/* save only call-used registers */	\
66	pushl	%ecx ;							\
67	pushl	%edx ;							\
68	pushl	%ds ;							\
69	MAYBE_PUSHL_ES ;						\
70	movl	$KDSEL,%eax ;						\
71	movl	%ax,%ds ;						\
72	MAYBE_MOVW_AX_ES ;						\
73	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
74	GET_MPLOCK ;		/* SMP Spin lock */			\
75	pushl	_intr_unit + (irq_num) * 4 ;				\
76	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
77	movl	$lapic_eoi, %eax ;					\
78	movl	$0, (%eax) ;						\
79	addl	$4,%esp ;						\
80	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
81	movl	_intr_countp + (irq_num) * 4,%eax ;			\
82	incl	(%eax) ;						\
83	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
84	notl	%eax ;							\
85	andl	_ipending,%eax ;					\
86	jne	2f ; 		/* yes, maybe handle them */		\
871: ;									\
88	MEXITCOUNT ;							\
89	REL_MPLOCK ;		/* SMP release global lock */		\
90	MAYBE_POPL_ES ;							\
91	popl	%ds ;							\
92	popl	%edx ;							\
93	popl	%ecx ;							\
94	popl	%eax ;							\
95	iret ;								\
96;									\
97	ALIGN_TEXT ;							\
982: ;									\
99	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
100	jae	1b ;		/* no, return */			\
101	movl	_cpl,%eax ;						\
102	/* XXX next line is probably unnecessary now. */		\
103	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
104	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
105	sti ;			/* to do this as early as possible */	\
106	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
107	popl	%ecx ;		/* ... original %ds ... */		\
108	popl	%edx ;							\
109	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
110	pushal ;		/* build fat frame (grrr) ... */	\
111	pushl	%ecx ;		/* ... actually %ds ... */		\
112	pushl	%es ;							\
113	movl	$KDSEL,%eax ;						\
114	movl	%ax,%es ;						\
115	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
116	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
117	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
118	pushl	%eax ;							\
119	subl	$4,%esp ;	/* junk for unit number */		\
120	MEXITCOUNT ;							\
121	jmp	_doreti
122
123#define	INTR(irq_num, vec_name)						\
124	.text ;								\
125	SUPERALIGN_TEXT ;						\
126IDTVEC(vec_name) ;							\
127	pushl	$0 ;		/* dummy error code */			\
128	pushl	$0 ;		/* dummy trap type */			\
129	pushal ;							\
130	pushl	%ds ;		/* save data and extra segments ... */	\
131	pushl	%es ;							\
132	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
133	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
134	movl	%ax,%es ;						\
135	GET_MPLOCK ;		/* SMP Spin lock */			\
136	MAYBE_MASK_IRQ(irq_num) ;					\
137	movl	$lapic_eoi, %eax ;					\
138	movl	$0, (%eax) ;						\
139	movl	_cpl,%eax ;						\
140	testl	$IRQ_BIT(irq_num), %eax ;				\
141	jne	3f ;							\
142	incb	_intr_nesting_level ;					\
143__CONCAT(Xresume,irq_num): ;						\
144	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
145	incl	_cnt+V_INTR ;	/* tally interrupts */			\
146	movl	_intr_countp + (irq_num) * 4,%eax ;			\
147	incl	(%eax) ;						\
148	movl	_cpl,%eax ;						\
149	pushl	%eax ;							\
150	pushl	_intr_unit + (irq_num) * 4 ;				\
151	orl	_intr_mask + (irq_num) * 4,%eax ;			\
152	movl	%eax,_cpl ;						\
153	sti ;								\
154	call	*_intr_handler + (irq_num) * 4 ;			\
155	MAYBE_UNMASK_IRQ(irq_num) ;					\
156	MEXITCOUNT ;							\
157	jmp	_doreti ;						\
158;									\
159	ALIGN_TEXT ;							\
1603: ;									\
161	/* XXX skip mcounting here to avoid double count */		\
162	orl	$IRQ_BIT(irq_num), _ipending ;				\
163	REL_MPLOCK ;		/* SMP release global lock */		\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp ;						\
168	iret
169
170
171/*
172 * Handle "spurious INTerrupts".
173 * Notes:
174 *  This is different than the "spurious INTerrupt" generated by an
175 *   8259 PIC for missing INTs.  See the APIC documentation for details.
176 *  This routine should NOT do an 'EOI' cycle.
177 */
178	.text
179	SUPERALIGN_TEXT
180	.globl _Xspuriousint
181_Xspuriousint:
182#ifdef COUNT_SPURIOUS_INTS
183	ss
184	incl	_sihits
185#endif
186
187	/* No EOI cycle used here */
188
189	iret
190
191
192/*
193 * Handle TLB shootdowns.
194 */
195	.text
196	SUPERALIGN_TEXT
197	.globl	_Xinvltlb
198_Xinvltlb:
199	pushl	%eax
200
201#ifdef COUNT_XINVLTLB_HITS
202	ss
203	movl	_cpuid, %eax
204	ss
205	incl	_xhits(,%eax,4)
206#endif /* COUNT_XINVLTLB_HITS */
207
208	movl	%cr3, %eax		/* invalidate the TLB */
209	movl	%eax, %cr3
210
211	movl	$lapic_eoi, %eax
212	ss				/* stack segment, avoid %ds load */
213	movl	$0, (%eax)		/* End Of Interrupt to APIC */
214
215	popl	%eax
216	iret
217
218
219#ifdef  TEST_CPUSTOP
220
221#include "i386/isa/intr_machdep.h"
222
223/*
224 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
225 *
226 *  - Signals its receipt.
227 *  - Waits for permission to restart.
228 *  - Signals its restart.
229 */
230
231	.text
232	SUPERALIGN_TEXT
233	.globl _Xcpustop
234_Xcpustop:
235	pushl	%eax
236	pushl	%ds			/* save current data segment */
237
238	movl	$KDSEL, %eax
239	movl	%ax, %ds		/* use KERNEL data segment */
240
241	movl	_cpuid, %eax
242
243#ifdef COUNT_CSHITS
244	incl	_cshits(,%eax,4)
245#endif /* COUNT_CSHITS */
246
247	ASMPOSTCODE_HI(0x10)
248
249	lock
250	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
251
252	ASMPOSTCODE_HI(0x20);
2531:
254	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
255	jnc	1b
256
257	ASMPOSTCODE_HI(0x30)
258
259	lock
260	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
261
262	ASMPOSTCODE_HI(0x40)
263
264	movl	$lapic_eoi, %eax
265	movl	$0, (%eax)		/* End Of Interrupt to APIC */
266
267	popl	%ds			/* restore previous data segment */
268	popl	%eax
269	iret
270
271#endif /* TEST_CPUSTOP */
272
273
274MCOUNT_LABEL(bintr)
275	FAST_INTR(0,fastintr0)
276	FAST_INTR(1,fastintr1)
277	FAST_INTR(2,fastintr2)
278	FAST_INTR(3,fastintr3)
279	FAST_INTR(4,fastintr4)
280	FAST_INTR(5,fastintr5)
281	FAST_INTR(6,fastintr6)
282	FAST_INTR(7,fastintr7)
283	FAST_INTR(8,fastintr8)
284	FAST_INTR(9,fastintr9)
285	FAST_INTR(10,fastintr10)
286	FAST_INTR(11,fastintr11)
287	FAST_INTR(12,fastintr12)
288	FAST_INTR(13,fastintr13)
289	FAST_INTR(14,fastintr14)
290	FAST_INTR(15,fastintr15)
291	FAST_INTR(16,fastintr16)
292	FAST_INTR(17,fastintr17)
293	FAST_INTR(18,fastintr18)
294	FAST_INTR(19,fastintr19)
295	FAST_INTR(20,fastintr20)
296	FAST_INTR(21,fastintr21)
297	FAST_INTR(22,fastintr22)
298	FAST_INTR(23,fastintr23)
299	INTR(0,intr0)
300	INTR(1,intr1)
301	INTR(2,intr2)
302	INTR(3,intr3)
303	INTR(4,intr4)
304	INTR(5,intr5)
305	INTR(6,intr6)
306	INTR(7,intr7)
307	INTR(8,intr8)
308	INTR(9,intr9)
309	INTR(10,intr10)
310	INTR(11,intr11)
311	INTR(12,intr12)
312	INTR(13,intr13)
313	INTR(14,intr14)
314	INTR(15,intr15)
315	INTR(16,intr16)
316	INTR(17,intr17)
317	INTR(18,intr18)
318	INTR(19,intr19)
319	INTR(20,intr20)
320	INTR(21,intr21)
321	INTR(22,intr22)
322	INTR(23,intr23)
323MCOUNT_LABEL(eintr)
324
325	.data
326ihandlers:			/* addresses of interrupt handlers */
327				/* actually resumption addresses for HWI's */
328	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
329	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
330	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
331	.long	Xresume12, Xresume13, Xresume14, Xresume15
332	.long	Xresume16, Xresume17, Xresume18, Xresume19
333	.long	Xresume20, Xresume21, Xresume22, Xresume23
334	.long	swi_tty,   swi_net
335	.long	0, 0, 0, 0
336	.long	_softclock, swi_ast
337
338imasks:				/* masks for interrupt handlers */
339	.space	NHWI*4		/* padding; HWI masks are elsewhere */
340
341	.long	SWI_TTY_MASK, SWI_NET_MASK
342	.long	0, 0, 0, 0
343	.long	SWI_CLOCK_MASK, SWI_AST_MASK
344
345	.globl _ivectors
346_ivectors:
347	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
348	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
349	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
350	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
351	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
352	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
353
354/* active flag for lazy masking */
355iactive:
356	.long	0
357
358#ifdef COUNT_SPURIOUS_INTS
359	.globl	_sihits
360_sihits:
361	.long	0
362#endif /* COUNT_SPURIOUS_INTS */
363
364#ifdef COUNT_XINVLTLB_HITS
365	.globl	_xhits
366_xhits:
367	.long	0
368	.long	0
369	.long	0
370	.long	0
371#endif /* COUNT_XINVLTLB_HITS */
372
373#ifdef TEST_CPUSTOP
374
375	.globl _stopped_cpus
376_stopped_cpus:
377	.long	0
378
379	.globl _started_cpus
380_started_cpus:
381	.long	0
382
383#ifdef COUNT_CSHITS
384	.globl	_cshits
385_cshits:
386	.long	0
387	.long	0
388	.long	0
389	.long	0
390#endif /* COUNT_CSHITS */
391
392#endif /* TEST_CPUSTOP */
393
394
395/*
396 * Interrupt counters and names.  The format of these and the label names
397 * must agree with what vmstat expects.  The tables are indexed by device
398 * ids so that we don't have to move the names around as devices are
399 * attached.
400 */
401#include "vector.h"
402	.globl	_intrcnt, _eintrcnt
403_intrcnt:
404	.space	(NR_DEVICES + ICU_LEN) * 4
405_eintrcnt:
406
407	.globl	_intrnames, _eintrnames
408_intrnames:
409	.ascii	DEVICE_NAMES
410	.asciz	"stray irq0"
411	.asciz	"stray irq1"
412	.asciz	"stray irq2"
413	.asciz	"stray irq3"
414	.asciz	"stray irq4"
415	.asciz	"stray irq5"
416	.asciz	"stray irq6"
417	.asciz	"stray irq7"
418	.asciz	"stray irq8"
419	.asciz	"stray irq9"
420	.asciz	"stray irq10"
421	.asciz	"stray irq11"
422	.asciz	"stray irq12"
423	.asciz	"stray irq13"
424	.asciz	"stray irq14"
425	.asciz	"stray irq15"
426	.asciz	"stray irq16"
427	.asciz	"stray irq17"
428	.asciz	"stray irq18"
429	.asciz	"stray irq19"
430	.asciz	"stray irq20"
431	.asciz	"stray irq21"
432	.asciz	"stray irq22"
433	.asciz	"stray irq23"
434_eintrnames:
435
436	.text
437