apic_vector.s revision 27406
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.10 1997/07/15 00:08:01 smp Exp smp $
4 */
5
6
7#include <machine/smptests.h>	/** TEST_CPUSTOP */
8
9/* convert an absolute IRQ# into a bitmask */
10#define IRQ_BIT(irq_num)	(1 << (irq_num))
11
12/* make an index into the IO APIC from the IRQ# */
13#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
14
15/*
16 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
17 */
18#define MAYBE_MASK_IRQ(irq_num)						\
19	testl	$IRQ_BIT(irq_num),iactive ;	/* lazy masking */	\
20	je	1f ;			/* NOT currently active */	\
21	orl	$IRQ_BIT(irq_num),_imen ;	/* set the mask bit */	\
22	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
23	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
24	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
25	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
26	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
27	movl	$0, lapic_eoi ;						\
28	orl	$IRQ_BIT(irq_num), _ipending ;				\
29	REL_MPLOCK ;			/* SMP release global lock */	\
30	popl	%es ;							\
31	popl	%ds ;							\
32	popal ;								\
33	addl	$4+4,%esp ;						\
34	iret ;								\
35;									\
36	ALIGN_TEXT ;							\
371: ;									\
38	orl	$IRQ_BIT(irq_num),iactive
39
40
41#define MAYBE_UNMASK_IRQ(irq_num)					\
42	cli ;	/* must unmask _imen and icu atomically */		\
43	andl	$~IRQ_BIT(irq_num),iactive ;				\
44	testl	$IRQ_BIT(irq_num),_imen ;				\
45	je	2f ;							\
46	andl	$~IRQ_BIT(irq_num),_imen ;	/* clear mask bit */	\
47	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
48	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
49	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
50	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
51	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
522: ;									\
53	sti ;	/* XXX _doreti repeats the cli/sti */
54
55
56/*
57 * Macros for interrupt interrupt entry, call to handler, and exit.
58 */
59
60#define	FAST_INTR(irq_num, vec_name)					\
61	.text ;								\
62	SUPERALIGN_TEXT ;						\
63IDTVEC(vec_name) ;							\
64	pushl	%eax ;		/* save only call-used registers */	\
65	pushl	%ecx ;							\
66	pushl	%edx ;							\
67	pushl	%ds ;							\
68	MAYBE_PUSHL_ES ;						\
69	movl	$KDSEL,%eax ;						\
70	movl	%ax,%ds ;						\
71	MAYBE_MOVW_AX_ES ;						\
72	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
73	GET_MPLOCK ;		/* SMP Spin lock */			\
74	pushl	_intr_unit + (irq_num) * 4 ;				\
75	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
76	movl	$0, lapic_eoi ;						\
77	addl	$4,%esp ;						\
78	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
79	movl	_intr_countp + (irq_num) * 4,%eax ;			\
80	incl	(%eax) ;						\
81	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
82	notl	%eax ;							\
83	andl	_ipending,%eax ;					\
84	jne	2f ; 		/* yes, maybe handle them */		\
851: ;									\
86	MEXITCOUNT ;							\
87	REL_MPLOCK ;		/* SMP release global lock */		\
88	MAYBE_POPL_ES ;							\
89	popl	%ds ;							\
90	popl	%edx ;							\
91	popl	%ecx ;							\
92	popl	%eax ;							\
93	iret ;								\
94;									\
95	ALIGN_TEXT ;							\
962: ;									\
97	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
98	jae	1b ;		/* no, return */			\
99	movl	_cpl,%eax ;						\
100	/* XXX next line is probably unnecessary now. */		\
101	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
102	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
103	sti ;			/* to do this as early as possible */	\
104	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
105	popl	%ecx ;		/* ... original %ds ... */		\
106	popl	%edx ;							\
107	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
108	pushal ;		/* build fat frame (grrr) ... */	\
109	pushl	%ecx ;		/* ... actually %ds ... */		\
110	pushl	%es ;							\
111	movl	$KDSEL,%eax ;						\
112	movl	%ax,%es ;						\
113	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
114	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
115	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
116	pushl	%eax ;							\
117	subl	$4,%esp ;	/* junk for unit number */		\
118	MEXITCOUNT ;							\
119	jmp	_doreti
120
121#define	INTR(irq_num, vec_name)						\
122	.text ;								\
123	SUPERALIGN_TEXT ;						\
124IDTVEC(vec_name) ;							\
125	pushl	$0 ;		/* dummy error code */			\
126	pushl	$0 ;		/* dummy trap type */			\
127	pushal ;							\
128	pushl	%ds ;		/* save data and extra segments ... */	\
129	pushl	%es ;							\
130	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
131	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
132	movl	%ax,%es ;						\
133	GET_MPLOCK ;		/* SMP Spin lock */			\
134	MAYBE_MASK_IRQ(irq_num) ;					\
135	movl	$0, lapic_eoi ;						\
136	movl	_cpl,%eax ;						\
137	testl	$IRQ_BIT(irq_num), %eax ;				\
138	jne	3f ;							\
139	incb	_intr_nesting_level ;					\
140__CONCAT(Xresume,irq_num): ;						\
141	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
142	incl	_cnt+V_INTR ;	/* tally interrupts */			\
143	movl	_intr_countp + (irq_num) * 4,%eax ;			\
144	incl	(%eax) ;						\
145	movl	_cpl,%eax ;						\
146	pushl	%eax ;							\
147	pushl	_intr_unit + (irq_num) * 4 ;				\
148	orl	_intr_mask + (irq_num) * 4,%eax ;			\
149	movl	%eax,_cpl ;						\
150	sti ;								\
151	call	*_intr_handler + (irq_num) * 4 ;			\
152	MAYBE_UNMASK_IRQ(irq_num) ;					\
153	MEXITCOUNT ;							\
154	jmp	_doreti ;						\
155;									\
156	ALIGN_TEXT ;							\
1573: ;									\
158	/* XXX skip mcounting here to avoid double count */		\
159	orl	$IRQ_BIT(irq_num), _ipending ;				\
160	REL_MPLOCK ;		/* SMP release global lock */		\
161	popl	%es ;							\
162	popl	%ds ;							\
163	popal ;								\
164	addl	$4+4,%esp ;						\
165	iret
166
167
168/*
169 * Handle "spurious INTerrupts".
170 * Notes:
171 *  This is different than the "spurious INTerrupt" generated by an
172 *   8259 PIC for missing INTs.  See the APIC documentation for details.
173 *  This routine should NOT do an 'EOI' cycle.
174 */
175	.text
176	SUPERALIGN_TEXT
177	.globl _Xspuriousint
178_Xspuriousint:
179#ifdef COUNT_SPURIOUS_INTS
180	ss
181	incl	_sihits
182#endif
183
184	/* No EOI cycle used here */
185
186	iret
187
188
189/*
190 * Handle TLB shootdowns.
191 */
192	.text
193	SUPERALIGN_TEXT
194	.globl	_Xinvltlb
195_Xinvltlb:
196	pushl	%eax
197
198#ifdef COUNT_XINVLTLB_HITS
199	ss
200	movl	_cpuid, %eax
201	ss
202	incl	_xhits(,%eax,4)
203#endif /* COUNT_XINVLTLB_HITS */
204
205	movl	%cr3, %eax		/* invalidate the TLB */
206	movl	%eax, %cr3
207
208	ss				/* stack segment, avoid %ds load */
209	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
210
211	popl	%eax
212	iret
213
214
215#ifdef  TEST_CPUSTOP
216
217#include "i386/isa/intr_machdep.h"
218
219/*
220 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
221 *
222 *  - Signals its receipt.
223 *  - Waits for permission to restart.
224 *  - Signals its restart.
225 */
226
227	.text
228	SUPERALIGN_TEXT
229	.globl _Xcpustop
230_Xcpustop:
231	pushl	%eax
232	pushl	%ds			/* save current data segment */
233
234	movl	$KDSEL, %eax
235	movl	%ax, %ds		/* use KERNEL data segment */
236
237	movl	_cpuid, %eax
238
239#ifdef COUNT_CSHITS
240	incl	_cshits(,%eax,4)
241#endif /* COUNT_CSHITS */
242
243	ASMPOSTCODE_HI(0x10)
244
245	lock
246	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
247
248	ASMPOSTCODE_HI(0x20);
2491:
250	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
251	jnc	1b
252
253	ASMPOSTCODE_HI(0x30)
254
255	lock
256	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
257
258	ASMPOSTCODE_HI(0x40)
259
260	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
261
262	popl	%ds			/* restore previous data segment */
263	popl	%eax
264	iret
265
266#endif /* TEST_CPUSTOP */
267
268
269MCOUNT_LABEL(bintr)
270	FAST_INTR(0,fastintr0)
271	FAST_INTR(1,fastintr1)
272	FAST_INTR(2,fastintr2)
273	FAST_INTR(3,fastintr3)
274	FAST_INTR(4,fastintr4)
275	FAST_INTR(5,fastintr5)
276	FAST_INTR(6,fastintr6)
277	FAST_INTR(7,fastintr7)
278	FAST_INTR(8,fastintr8)
279	FAST_INTR(9,fastintr9)
280	FAST_INTR(10,fastintr10)
281	FAST_INTR(11,fastintr11)
282	FAST_INTR(12,fastintr12)
283	FAST_INTR(13,fastintr13)
284	FAST_INTR(14,fastintr14)
285	FAST_INTR(15,fastintr15)
286	FAST_INTR(16,fastintr16)
287	FAST_INTR(17,fastintr17)
288	FAST_INTR(18,fastintr18)
289	FAST_INTR(19,fastintr19)
290	FAST_INTR(20,fastintr20)
291	FAST_INTR(21,fastintr21)
292	FAST_INTR(22,fastintr22)
293	FAST_INTR(23,fastintr23)
294	INTR(0,intr0)
295	INTR(1,intr1)
296	INTR(2,intr2)
297	INTR(3,intr3)
298	INTR(4,intr4)
299	INTR(5,intr5)
300	INTR(6,intr6)
301	INTR(7,intr7)
302	INTR(8,intr8)
303	INTR(9,intr9)
304	INTR(10,intr10)
305	INTR(11,intr11)
306	INTR(12,intr12)
307	INTR(13,intr13)
308	INTR(14,intr14)
309	INTR(15,intr15)
310	INTR(16,intr16)
311	INTR(17,intr17)
312	INTR(18,intr18)
313	INTR(19,intr19)
314	INTR(20,intr20)
315	INTR(21,intr21)
316	INTR(22,intr22)
317	INTR(23,intr23)
318MCOUNT_LABEL(eintr)
319
320	.data
321ihandlers:			/* addresses of interrupt handlers */
322				/* actually resumption addresses for HWI's */
323	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
324	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
325	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
326	.long	Xresume12, Xresume13, Xresume14, Xresume15
327	.long	Xresume16, Xresume17, Xresume18, Xresume19
328	.long	Xresume20, Xresume21, Xresume22, Xresume23
329	.long	swi_tty,   swi_net
330	.long	0, 0, 0, 0
331	.long	_softclock, swi_ast
332
333imasks:				/* masks for interrupt handlers */
334	.space	NHWI*4		/* padding; HWI masks are elsewhere */
335
336	.long	SWI_TTY_MASK, SWI_NET_MASK
337	.long	0, 0, 0, 0
338	.long	SWI_CLOCK_MASK, SWI_AST_MASK
339
340	.globl _ivectors
341_ivectors:
342	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
343	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
344	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
345	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
346	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
347	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
348
349/* active flag for lazy masking */
350iactive:
351	.long	0
352
353#ifdef COUNT_SPURIOUS_INTS
354	.globl	_sihits
355_sihits:
356	.long	0
357#endif /* COUNT_SPURIOUS_INTS */
358
359#ifdef COUNT_XINVLTLB_HITS
360	.globl	_xhits
361_xhits:
362	.long	0
363	.long	0
364	.long	0
365	.long	0
366#endif /* COUNT_XINVLTLB_HITS */
367
368#ifdef TEST_CPUSTOP
369
370	.globl _stopped_cpus
371_stopped_cpus:
372	.long	0
373
374	.globl _started_cpus
375_started_cpus:
376	.long	0
377
378#ifdef COUNT_CSHITS
379	.globl	_cshits
380_cshits:
381	.long	0
382	.long	0
383	.long	0
384	.long	0
385#endif /* COUNT_CSHITS */
386
387#endif /* TEST_CPUSTOP */
388
389
390/*
391 * Interrupt counters and names.  The format of these and the label names
392 * must agree with what vmstat expects.  The tables are indexed by device
393 * ids so that we don't have to move the names around as devices are
394 * attached.
395 */
396#include "vector.h"
397	.globl	_intrcnt, _eintrcnt
398_intrcnt:
399	.space	(NR_DEVICES + ICU_LEN) * 4
400_eintrcnt:
401
402	.globl	_intrnames, _eintrnames
403_intrnames:
404	.ascii	DEVICE_NAMES
405	.asciz	"stray irq0"
406	.asciz	"stray irq1"
407	.asciz	"stray irq2"
408	.asciz	"stray irq3"
409	.asciz	"stray irq4"
410	.asciz	"stray irq5"
411	.asciz	"stray irq6"
412	.asciz	"stray irq7"
413	.asciz	"stray irq8"
414	.asciz	"stray irq9"
415	.asciz	"stray irq10"
416	.asciz	"stray irq11"
417	.asciz	"stray irq12"
418	.asciz	"stray irq13"
419	.asciz	"stray irq14"
420	.asciz	"stray irq15"
421	.asciz	"stray irq16"
422	.asciz	"stray irq17"
423	.asciz	"stray irq18"
424	.asciz	"stray irq19"
425	.asciz	"stray irq20"
426	.asciz	"stray irq21"
427	.asciz	"stray irq22"
428	.asciz	"stray irq23"
429_eintrnames:
430
431	.text
432