apic_vector.s revision 28043
197403Sobrien/*
297403Sobrien *	from: vector.s, 386BSD 0.1 unknown origin
397403Sobrien *	$Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
497403Sobrien */
597403Sobrien
697403Sobrien
797403Sobrien#include <machine/apic.h>
897403Sobrien#include <machine/smp.h>
997403Sobrien#include <machine/smptests.h>		/** PEND_INTS, various counters */
1097403Sobrien
1197403Sobrien#include "i386/isa/intr_machdep.h"
1297403Sobrien
1397403Sobrien
1497403Sobrien/* convert an absolute IRQ# into a bitmask */
1597403Sobrien#define IRQ_BIT(irq_num)	(1 << (irq_num))
1697403Sobrien
1797403Sobrien/* make an index into the IO APIC from the IRQ# */
18169691Skan#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
1997403Sobrien
2097403Sobrien/*
2197403Sobrien * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
2297403Sobrien */
2397403Sobrien
2497403Sobrien#ifdef PEND_INTS
2597403Sobrien
2697403Sobrien/*
2797403Sobrien * the 1st version fails because masked edge-triggered INTs are lost
2897403Sobrien * by the IO APIC.  This version tests to see whether we are handling
2997403Sobrien * an edge or level triggered INT.  Level-triggered INTs must still be
3097403Sobrien * masked as we don't clear the source, and the EOI cycle would allow
3197403Sobrien * recursive INTs to occur.
3297403Sobrien */
3397403Sobrien#define MAYBE_MASK_IRQ(irq_num)						\
3497403Sobrien	lock ;					/* MP-safe */		\
3597403Sobrien	btsl	$(irq_num),iactive ;		/* lazy masking */	\
3697403Sobrien	jc	6f ;				/* already active */	\
3797403Sobrien	pushl	$_mp_lock ;			/* GIANT_LOCK */	\
3897403Sobrien	call	_MPtrylock ;			/* try to get lock */	\
3997403Sobrien	add $4,	%esp ;							\
4097403Sobrien	testl	%eax, %eax ;			/* did we get it? */	\
4197403Sobrien	jnz	8f ;				/* yes, enter kernel */	\
4297403Sobrien6: ;						/* active or locked */	\
4397403Sobrien	IMASK_LOCK ;				/* into critical reg */	\
4497403Sobrien	testl	$IRQ_BIT(irq_num),_apic_pin_trigger ;			\
4597403Sobrien	jz	7f ;				/* edge, don't mask */	\
4697403Sobrien	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
4797403Sobrien	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
4897403Sobrien	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
4997403Sobrien	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
5097403Sobrien	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
5197403Sobrien	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
5297403Sobrien7: ;									\
5397403Sobrien	lock ;					/* MP-safe */		\
5497403Sobrien	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
5597403Sobrien	IMASK_UNLOCK ;				/* exit critical reg */	\
56132720Skan	movl	$0, lapic_eoi ;			/* do the EOI */	\
57132720Skan	popl	%es ;							\
5897403Sobrien	popl	%ds ;							\
5997403Sobrien	popal ;								\
6097403Sobrien	addl	$4+4,%esp ;						\
6197403Sobrien	iret ;								\
6297403Sobrien;									\
6397403Sobrien	ALIGN_TEXT ;							\
6497403Sobrien8:
6597403Sobrien
6697403Sobrien#else /* PEND_INTS */
6797403Sobrien
68132720Skan#define MAYBE_MASK_IRQ(irq_num)						\
69	lock ;					/* MP-safe */		\
70	btsl	$(irq_num),iactive ;		/* lazy masking */	\
71	jnc	1f ;				/* NOT active */	\
72	IMASK_LOCK ;				/* enter critical reg */\
73	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
74	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
75	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
76	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
77	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
78	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
79	lock ;					/* MP-safe */		\
80	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
81	movl	$0, lapic_eoi ;			/* do the EOI */	\
82	IMASK_UNLOCK ;				/* exit critical reg */	\
83	popl	%es ;							\
84	popl	%ds ;							\
85	popal ;								\
86	addl	$4+4,%esp ;						\
87	iret ;								\
88;									\
89	ALIGN_TEXT ;							\
901: ;									\
91	call	_get_mplock			/* SMP Spin lock */
92
93#endif /* PEND_INTS */
94
95
96#define MAYBE_UNMASK_IRQ(irq_num)					\
97	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
98	lock ;					/* MP-safe */		\
99	andl	$~IRQ_BIT(irq_num),iactive ;				\
100	IMASK_LOCK ;				/* enter critical reg */\
101	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
102	je	9f ;							\
103	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
104	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
105	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
106	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
107	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
108	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1099: ;									\
110	IMASK_UNLOCK ;				/* exit critical reg */	\
111	sti	/* XXX _doreti repeats the cli/sti */
112
113
114/*
115 * Macros for interrupt interrupt entry, call to handler, and exit.
116 */
117
118#define	FAST_INTR(irq_num, vec_name)					\
119	.text ;								\
120	SUPERALIGN_TEXT ;						\
121IDTVEC(vec_name) ;							\
122	pushl	%eax ;		/* save only call-used registers */	\
123	pushl	%ecx ;							\
124	pushl	%edx ;							\
125	pushl	%ds ;							\
126	MAYBE_PUSHL_ES ;						\
127	movl	$KDSEL,%eax ;						\
128	movl	%ax,%ds ;						\
129	MAYBE_MOVW_AX_ES ;						\
130	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
131	call	_get_isrlock ;						\
132	pushl	_intr_unit + (irq_num) * 4 ;				\
133	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
134	movl	$0, lapic_eoi ;						\
135	addl	$4,%esp ;						\
136	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
137	movl	_intr_countp + (irq_num) * 4,%eax ;			\
138	incl	(%eax) ;						\
139	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
140	notl	%eax ;							\
141	andl	_ipending,%eax ;					\
142	jne	2f ; 		/* yes, maybe handle them */		\
1431: ;									\
144	MEXITCOUNT ;							\
145	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
146	call	_MPrellock ;						\
147	add	$4, %esp ;						\
148	MAYBE_POPL_ES ;							\
149	popl	%ds ;							\
150	popl	%edx ;							\
151	popl	%ecx ;							\
152	popl	%eax ;							\
153	iret ;								\
154;									\
155	ALIGN_TEXT ;							\
1562: ;									\
157	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
158	jae	1b ;		/* no, return */			\
159	movl	_cpl,%eax ;						\
160	/* XXX next line is probably unnecessary now. */		\
161	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
162	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
163	sti ;			/* to do this as early as possible */	\
164	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
165	popl	%ecx ;		/* ... original %ds ... */		\
166	popl	%edx ;							\
167	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
168	pushal ;		/* build fat frame (grrr) ... */	\
169	pushl	%ecx ;		/* ... actually %ds ... */		\
170	pushl	%es ;							\
171	movl	$KDSEL,%eax ;						\
172	movl	%ax,%es ;						\
173	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
174	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
175	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
176	pushl	%eax ;							\
177	subl	$4,%esp ;	/* junk for unit number */		\
178	MEXITCOUNT ;							\
179	jmp	_doreti
180
181#define	INTR(irq_num, vec_name)						\
182	.text ;								\
183	SUPERALIGN_TEXT ;						\
184IDTVEC(vec_name) ;							\
185	pushl	$0 ;		/* dummy error code */			\
186	pushl	$0 ;		/* dummy trap type */			\
187	pushal ;							\
188	pushl	%ds ;		/* save data and extra segments ... */	\
189	pushl	%es ;							\
190	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
191	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
192	movl	%ax,%es ;						\
193	MAYBE_MASK_IRQ(irq_num) ;					\
194	movl	$0, lapic_eoi ;						\
195	movl	_cpl,%eax ;						\
196	testl	$IRQ_BIT(irq_num), %eax ;				\
197	jne	3f ;							\
198	incb	_intr_nesting_level ;					\
199__CONCAT(Xresume,irq_num): ;						\
200	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
201	incl	_cnt+V_INTR ;	/* tally interrupts */			\
202	movl	_intr_countp + (irq_num) * 4,%eax ;			\
203	incl	(%eax) ;						\
204	movl	_cpl,%eax ;						\
205	pushl	%eax ;							\
206	pushl	_intr_unit + (irq_num) * 4 ;				\
207	orl	_intr_mask + (irq_num) * 4,%eax ;			\
208	movl	%eax,_cpl ;						\
209	sti ;								\
210	call	*_intr_handler + (irq_num) * 4 ;			\
211	MAYBE_UNMASK_IRQ(irq_num) ;					\
212	MEXITCOUNT ;							\
213	jmp	_doreti ;						\
214;									\
215	ALIGN_TEXT ;							\
2163: ;									\
217	/* XXX skip mcounting here to avoid double count */		\
218	lock ;					/* MP-safe */		\
219	orl	$IRQ_BIT(irq_num), _ipending ;				\
220	pushl	$_mp_lock ;			/* GIANT_LOCK */	\
221	call	_MPrellock ;						\
222	add	$4, %esp ;						\
223	popl	%es ;							\
224	popl	%ds ;							\
225	popal ;								\
226	addl	$4+4,%esp ;						\
227	iret
228
229
230/*
231 * Handle "spurious INTerrupts".
232 * Notes:
233 *  This is different than the "spurious INTerrupt" generated by an
234 *   8259 PIC for missing INTs.  See the APIC documentation for details.
235 *  This routine should NOT do an 'EOI' cycle.
236 */
237	.text
238	SUPERALIGN_TEXT
239	.globl _Xspuriousint
240_Xspuriousint:
241#ifdef COUNT_SPURIOUS_INTS
242	ss
243	incl	_sihits
244#endif
245
246	/* No EOI cycle used here */
247
248	iret
249
250
251/*
252 * Handle TLB shootdowns.
253 */
254	.text
255	SUPERALIGN_TEXT
256	.globl	_Xinvltlb
257_Xinvltlb:
258	pushl	%eax
259
260#ifdef COUNT_XINVLTLB_HITS
261	ss
262	movl	_cpuid, %eax
263	ss
264	incl	_xhits(,%eax,4)
265#endif /* COUNT_XINVLTLB_HITS */
266
267	movl	%cr3, %eax		/* invalidate the TLB */
268	movl	%eax, %cr3
269
270	ss				/* stack segment, avoid %ds load */
271	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
272
273	popl	%eax
274	iret
275
276
277/*
278 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
279 *
280 *  - Signals its receipt.
281 *  - Waits for permission to restart.
282 *  - Signals its restart.
283 */
284
285	.text
286	SUPERALIGN_TEXT
287	.globl _Xcpustop
288_Xcpustop:
289	pushl	%eax
290	pushl	%ds			/* save current data segment */
291
292	movl	$KDSEL, %eax
293	movl	%ax, %ds		/* use KERNEL data segment */
294
295	movl	_cpuid, %eax
296
297#ifdef COUNT_CSHITS
298	incl	_cshits(,%eax,4)
299#endif /* COUNT_CSHITS */
300
301	ASMPOSTCODE_HI(0x1)
302
303	lock
304	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
305
306	ASMPOSTCODE_HI(0x2);
3071:
308	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
309	jnc	1b
310
311	ASMPOSTCODE_HI(0x3)
312
313	lock
314	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
315
316	ASMPOSTCODE_HI(0x4)
317
318	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
319
320	popl	%ds			/* restore previous data segment */
321	popl	%eax
322	iret
323
324
325MCOUNT_LABEL(bintr)
326	FAST_INTR(0,fastintr0)
327	FAST_INTR(1,fastintr1)
328	FAST_INTR(2,fastintr2)
329	FAST_INTR(3,fastintr3)
330	FAST_INTR(4,fastintr4)
331	FAST_INTR(5,fastintr5)
332	FAST_INTR(6,fastintr6)
333	FAST_INTR(7,fastintr7)
334	FAST_INTR(8,fastintr8)
335	FAST_INTR(9,fastintr9)
336	FAST_INTR(10,fastintr10)
337	FAST_INTR(11,fastintr11)
338	FAST_INTR(12,fastintr12)
339	FAST_INTR(13,fastintr13)
340	FAST_INTR(14,fastintr14)
341	FAST_INTR(15,fastintr15)
342	FAST_INTR(16,fastintr16)
343	FAST_INTR(17,fastintr17)
344	FAST_INTR(18,fastintr18)
345	FAST_INTR(19,fastintr19)
346	FAST_INTR(20,fastintr20)
347	FAST_INTR(21,fastintr21)
348	FAST_INTR(22,fastintr22)
349	FAST_INTR(23,fastintr23)
350	INTR(0,intr0)
351	INTR(1,intr1)
352	INTR(2,intr2)
353	INTR(3,intr3)
354	INTR(4,intr4)
355	INTR(5,intr5)
356	INTR(6,intr6)
357	INTR(7,intr7)
358	INTR(8,intr8)
359	INTR(9,intr9)
360	INTR(10,intr10)
361	INTR(11,intr11)
362	INTR(12,intr12)
363	INTR(13,intr13)
364	INTR(14,intr14)
365	INTR(15,intr15)
366	INTR(16,intr16)
367	INTR(17,intr17)
368	INTR(18,intr18)
369	INTR(19,intr19)
370	INTR(20,intr20)
371	INTR(21,intr21)
372	INTR(22,intr22)
373	INTR(23,intr23)
374MCOUNT_LABEL(eintr)
375
376	.data
377ihandlers:			/* addresses of interrupt handlers */
378				/* actually resumption addresses for HWI's */
379	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
380	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
381	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
382	.long	Xresume12, Xresume13, Xresume14, Xresume15
383	.long	Xresume16, Xresume17, Xresume18, Xresume19
384	.long	Xresume20, Xresume21, Xresume22, Xresume23
385	.long	swi_tty,   swi_net
386	.long	0, 0, 0, 0
387	.long	_softclock, swi_ast
388
389imasks:				/* masks for interrupt handlers */
390	.space	NHWI*4		/* padding; HWI masks are elsewhere */
391
392	.long	SWI_TTY_MASK, SWI_NET_MASK
393	.long	0, 0, 0, 0
394	.long	SWI_CLOCK_MASK, SWI_AST_MASK
395
396	.globl _ivectors
397_ivectors:
398	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
399	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
400	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
401	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
402	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
403	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
404
405/* active flag for lazy masking */
406iactive:
407	.long	0
408
409#ifdef COUNT_SPURIOUS_INTS
410	.globl	_sihits
411_sihits:
412	.long	0
413#endif /* COUNT_SPURIOUS_INTS */
414
415#ifdef COUNT_XINVLTLB_HITS
416	.globl	_xhits
417_xhits:
418	.space	(NCPU * 4), 0
419#endif /* COUNT_XINVLTLB_HITS */
420
421/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
422	.globl _stopped_cpus, _started_cpus
423_stopped_cpus:
424	.long	0
425_started_cpus:
426	.long	0
427
428#ifdef COUNT_CSHITS
429	.globl	_cshits
430_cshits:
431	.space	(NCPU * 4), 0
432#endif /* COUNT_CSHITS */
433
434#ifdef PEND_INTS
435	.globl	_apic_pin_trigger
436_apic_pin_trigger:
437	.space	(NAPIC * 4), 0
438#endif /* PEND_INTS */
439
440
441/*
442 * Interrupt counters and names.  The format of these and the label names
443 * must agree with what vmstat expects.  The tables are indexed by device
444 * ids so that we don't have to move the names around as devices are
445 * attached.
446 */
447#include "vector.h"
448	.globl	_intrcnt, _eintrcnt
449_intrcnt:
450	.space	(NR_DEVICES + ICU_LEN) * 4
451_eintrcnt:
452
453	.globl	_intrnames, _eintrnames
454_intrnames:
455	.ascii	DEVICE_NAMES
456	.asciz	"stray irq0"
457	.asciz	"stray irq1"
458	.asciz	"stray irq2"
459	.asciz	"stray irq3"
460	.asciz	"stray irq4"
461	.asciz	"stray irq5"
462	.asciz	"stray irq6"
463	.asciz	"stray irq7"
464	.asciz	"stray irq8"
465	.asciz	"stray irq9"
466	.asciz	"stray irq10"
467	.asciz	"stray irq11"
468	.asciz	"stray irq12"
469	.asciz	"stray irq13"
470	.asciz	"stray irq14"
471	.asciz	"stray irq15"
472	.asciz	"stray irq16"
473	.asciz	"stray irq17"
474	.asciz	"stray irq18"
475	.asciz	"stray irq19"
476	.asciz	"stray irq20"
477	.asciz	"stray irq21"
478	.asciz	"stray irq22"
479	.asciz	"stray irq23"
480_eintrnames:
481
482	.text
483