apic_vector.s revision 27779
112099Sjoerg/*
212099Sjoerg *	from: vector.s, 386BSD 0.1 unknown origin
312099Sjoerg *	$Id: apic_vector.s,v 1.18 1997/07/30 22:46:49 smp Exp smp $
412099Sjoerg */
512099Sjoerg
612099Sjoerg
712099Sjoerg#include <machine/smp.h>
812099Sjoerg#include <machine/smptests.h>		/** PEND_INTS, various counters */
912099Sjoerg#include "i386/isa/intr_machdep.h"
1012099Sjoerg
1112099Sjoerg/* convert an absolute IRQ# into a bitmask */
1212099Sjoerg#define IRQ_BIT(irq_num)	(1 << (irq_num))
1312099Sjoerg
1412099Sjoerg/* make an index into the IO APIC from the IRQ# */
1512099Sjoerg#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
1612099Sjoerg
1712099Sjoerg/*
1812099Sjoerg * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au>
1912099Sjoerg */
2012099Sjoerg
2112099Sjoerg#ifdef PEND_INTS
2212099Sjoerg
2312099Sjoerg/*
2412099Sjoerg * the 1st version fails because masked edge-triggered INTs are lost
2512099Sjoerg * by the IO APIC.  This version tests to see whether we are handling
2612099Sjoerg * an edge or level triggered INT.  Level-triggered INTs must still be
2712099Sjoerg * masked as we don't clear the source, and the EOI cycle would allow
2812099Sjoerg * recursive INTs to occur.
2912099Sjoerg */
3012099Sjoerg#define MAYBE_MASK_IRQ(irq_num)						\
3112099Sjoerg	lock ;					/* MP-safe */		\
3212099Sjoerg	btsl	$(irq_num),iactive ;		/* lazy masking */	\
3312099Sjoerg	jc	6f ;				/* already active */	\
3412099Sjoerg	TRY_ISRLOCK(irq_num) ;			/* try to get lock */	\
3512099Sjoerg	testl	%eax, %eax ;			/* did we get it? */	\
3612099Sjoerg	jnz	8f ;				/* yes, enter kernel */	\
3712099Sjoerg6: ;						/* active or locked */	\
3812099Sjoerg	IMASK_LOCK ;				/* into critical reg */	\
3912099Sjoerg	testl	$IRQ_BIT(irq_num),_apic_pin_trigger ;			\
4012099Sjoerg	jz	7f ;				/* edge, don't mask */	\
4112099Sjoerg	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
4212099Sjoerg	movl	_ioapic,%ecx ;			/* ioapic[0] addr */	\
4312099Sjoerg	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
4412099Sjoerg	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
4512099Sjoerg	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
4612099Sjoerg	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
4712099Sjoerg7: ;									\
4812099Sjoerg	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
4912099Sjoerg	IMASK_UNLOCK ;				/* exit critical reg */	\
5012099Sjoerg	movl	$0, lapic_eoi ;			/* do the EOI */	\
5112099Sjoerg	popl	%es ;							\
5212099Sjoerg	popl	%ds ;							\
5312099Sjoerg	popal ;								\
5412099Sjoerg	addl	$4+4,%esp ;						\
5512099Sjoerg	iret ;								\
5612099Sjoerg;									\
5712099Sjoerg	ALIGN_TEXT ;							\
5812099Sjoerg8:
5912099Sjoerg
6012099Sjoerg#else /* PEND_INTS */
6112099Sjoerg
6212099Sjoerg#define MAYBE_MASK_IRQ(irq_num)						\
6312099Sjoerg	lock ;					/* MP-safe */		\
6412099Sjoerg	btsl	$(irq_num),iactive ;		/* lazy masking */	\
6512099Sjoerg	jnc	1f ;				/* NOT active */	\
6612099Sjoerg	IMASK_LOCK ;				/* enter critical reg */\
6712099Sjoerg	orl	$IRQ_BIT(irq_num),_apic_imen ;	/* set the mask bit */	\
6812099Sjoerg	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
6912099Sjoerg	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
7012099Sjoerg	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
7112099Sjoerg	orl	$IOART_INTMASK,%eax ;		/* set the mask */	\
7212099Sjoerg	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
7312099Sjoerg	orl	$IRQ_BIT(irq_num), _ipending ;	/* set _ipending bit */	\
7412099Sjoerg	movl	$0, lapic_eoi ;			/* do the EOI */	\
7512099Sjoerg	IMASK_UNLOCK ;				/* exit critical reg */	\
7612099Sjoerg	popl	%es ;							\
7712099Sjoerg	popl	%ds ;							\
7812099Sjoerg	popal ;								\
7912099Sjoerg	addl	$4+4,%esp ;						\
8012099Sjoerg	iret ;								\
8112099Sjoerg;									\
8212099Sjoerg	ALIGN_TEXT ;							\
8312099Sjoerg1: ;									\
8412099Sjoerg	GET_MPLOCK				/* SMP Spin lock */
8512099Sjoerg
8612099Sjoerg#endif /* PEND_INTS */
8712099Sjoerg
8812099Sjoerg
8912099Sjoerg#define MAYBE_UNMASK_IRQ(irq_num)					\
9012099Sjoerg	cli ;	/* must unmask _apic_imen and IO APIC atomically */	\
9112099Sjoerg	lock ;					/* MP-safe */		\
9212099Sjoerg	andl	$~IRQ_BIT(irq_num),iactive ;				\
9312099Sjoerg	IMASK_LOCK ;				/* enter critical reg */\
9412099Sjoerg	testl	$IRQ_BIT(irq_num),_apic_imen ;				\
9512099Sjoerg	je	9f ;							\
9612099Sjoerg	andl	$~IRQ_BIT(irq_num),_apic_imen ;	/* clear mask bit */	\
9712099Sjoerg	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
9812099Sjoerg	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
9912099Sjoerg	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
10012099Sjoerg	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
10112099Sjoerg	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
10212099Sjoerg9: ;									\
10312099Sjoerg	IMASK_UNLOCK ;				/* exit critical reg */	\
10412099Sjoerg	sti	/* XXX _doreti repeats the cli/sti */
10512099Sjoerg
10612099Sjoerg
10712099Sjoerg/*
10812099Sjoerg * Macros for interrupt interrupt entry, call to handler, and exit.
10912099Sjoerg */
11012099Sjoerg
11112099Sjoerg#define	FAST_INTR(irq_num, vec_name)					\
11212099Sjoerg	.text ;								\
11312099Sjoerg	SUPERALIGN_TEXT ;						\
11412099SjoergIDTVEC(vec_name) ;							\
11512099Sjoerg	pushl	%eax ;		/* save only call-used registers */	\
11612099Sjoerg	pushl	%ecx ;							\
11712099Sjoerg	pushl	%edx ;							\
11812099Sjoerg	pushl	%ds ;							\
11912099Sjoerg	MAYBE_PUSHL_ES ;						\
12012099Sjoerg	movl	$KDSEL,%eax ;						\
12112099Sjoerg	movl	%ax,%ds ;						\
12212099Sjoerg	MAYBE_MOVW_AX_ES ;						\
12312099Sjoerg	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
124	GET_ISRLOCK(irq_num) ;						\
125	pushl	_intr_unit + (irq_num) * 4 ;				\
126	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
127	movl	$0, lapic_eoi ;						\
128	addl	$4,%esp ;						\
129	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
130	movl	_intr_countp + (irq_num) * 4,%eax ;			\
131	incl	(%eax) ;						\
132	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
133	notl	%eax ;							\
134	andl	_ipending,%eax ;					\
135	jne	2f ; 		/* yes, maybe handle them */		\
1361: ;									\
137	MEXITCOUNT ;							\
138	REL_ISRLOCK(irq_num) ;						\
139	MAYBE_POPL_ES ;							\
140	popl	%ds ;							\
141	popl	%edx ;							\
142	popl	%ecx ;							\
143	popl	%eax ;							\
144	iret ;								\
145;									\
146	ALIGN_TEXT ;							\
1472: ;									\
148	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
149	jae	1b ;		/* no, return */			\
150	movl	_cpl,%eax ;						\
151	/* XXX next line is probably unnecessary now. */		\
152	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
153	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
154	sti ;			/* to do this as early as possible */	\
155	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
156	popl	%ecx ;		/* ... original %ds ... */		\
157	popl	%edx ;							\
158	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
159	pushal ;		/* build fat frame (grrr) ... */	\
160	pushl	%ecx ;		/* ... actually %ds ... */		\
161	pushl	%es ;							\
162	movl	$KDSEL,%eax ;						\
163	movl	%ax,%es ;						\
164	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
165	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
166	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
167	pushl	%eax ;							\
168	subl	$4,%esp ;	/* junk for unit number */		\
169	MEXITCOUNT ;							\
170	jmp	_doreti
171
172#define	INTR(irq_num, vec_name)						\
173	.text ;								\
174	SUPERALIGN_TEXT ;						\
175IDTVEC(vec_name) ;							\
176	pushl	$0 ;		/* dummy error code */			\
177	pushl	$0 ;		/* dummy trap type */			\
178	pushal ;							\
179	pushl	%ds ;		/* save data and extra segments ... */	\
180	pushl	%es ;							\
181	movl	$KDSEL,%eax ;	/* ... and reload with kernel's ... */	\
182	movl	%ax,%ds ;	/* ... early for obsolete reasons */	\
183	movl	%ax,%es ;						\
184	MAYBE_MASK_IRQ(irq_num) ;					\
185	movl	$0, lapic_eoi ;						\
186	movl	_cpl,%eax ;						\
187	testl	$IRQ_BIT(irq_num), %eax ;				\
188	jne	3f ;							\
189	incb	_intr_nesting_level ;					\
190__CONCAT(Xresume,irq_num): ;						\
191	FAKE_MCOUNT(12*4(%esp)) ;	/* XXX late to avoid dbl cnt */ \
192	incl	_cnt+V_INTR ;	/* tally interrupts */			\
193	movl	_intr_countp + (irq_num) * 4,%eax ;			\
194	incl	(%eax) ;						\
195	movl	_cpl,%eax ;						\
196	pushl	%eax ;							\
197	pushl	_intr_unit + (irq_num) * 4 ;				\
198	orl	_intr_mask + (irq_num) * 4,%eax ;			\
199	movl	%eax,_cpl ;						\
200	sti ;								\
201	call	*_intr_handler + (irq_num) * 4 ;			\
202	MAYBE_UNMASK_IRQ(irq_num) ;					\
203	MEXITCOUNT ;							\
204	jmp	_doreti ;						\
205;									\
206	ALIGN_TEXT ;							\
2073: ;									\
208	/* XXX skip mcounting here to avoid double count */		\
209	orl	$IRQ_BIT(irq_num), _ipending ;				\
210	REL_ISRLOCK(irq_num) ;						\
211	popl	%es ;							\
212	popl	%ds ;							\
213	popal ;								\
214	addl	$4+4,%esp ;						\
215	iret
216
217
218/*
219 * Handle "spurious INTerrupts".
220 * Notes:
221 *  This is different than the "spurious INTerrupt" generated by an
222 *   8259 PIC for missing INTs.  See the APIC documentation for details.
223 *  This routine should NOT do an 'EOI' cycle.
224 */
225	.text
226	SUPERALIGN_TEXT
227	.globl _Xspuriousint
228_Xspuriousint:
229#ifdef COUNT_SPURIOUS_INTS
230	ss
231	incl	_sihits
232#endif
233
234	/* No EOI cycle used here */
235
236	iret
237
238
239/*
240 * Handle TLB shootdowns.
241 */
242	.text
243	SUPERALIGN_TEXT
244	.globl	_Xinvltlb
245_Xinvltlb:
246	pushl	%eax
247
248#ifdef COUNT_XINVLTLB_HITS
249	ss
250	movl	_cpuid, %eax
251	ss
252	incl	_xhits(,%eax,4)
253#endif /* COUNT_XINVLTLB_HITS */
254
255	movl	%cr3, %eax		/* invalidate the TLB */
256	movl	%eax, %cr3
257
258	ss				/* stack segment, avoid %ds load */
259	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
260
261	popl	%eax
262	iret
263
264
265/*
266 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
267 *
268 *  - Signals its receipt.
269 *  - Waits for permission to restart.
270 *  - Signals its restart.
271 */
272
273	.text
274	SUPERALIGN_TEXT
275	.globl _Xcpustop
276_Xcpustop:
277	pushl	%eax
278	pushl	%ds			/* save current data segment */
279
280	movl	$KDSEL, %eax
281	movl	%ax, %ds		/* use KERNEL data segment */
282
283	movl	_cpuid, %eax
284
285#ifdef COUNT_CSHITS
286	incl	_cshits(,%eax,4)
287#endif /* COUNT_CSHITS */
288
289	ASMPOSTCODE_HI(0x1)
290
291	lock
292	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
293
294	ASMPOSTCODE_HI(0x2);
2951:
296	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
297	jnc	1b
298
299	ASMPOSTCODE_HI(0x3)
300
301	lock
302	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
303
304	ASMPOSTCODE_HI(0x4)
305
306	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
307
308	popl	%ds			/* restore previous data segment */
309	popl	%eax
310	iret
311
312
313MCOUNT_LABEL(bintr)
314	FAST_INTR(0,fastintr0)
315	FAST_INTR(1,fastintr1)
316	FAST_INTR(2,fastintr2)
317	FAST_INTR(3,fastintr3)
318	FAST_INTR(4,fastintr4)
319	FAST_INTR(5,fastintr5)
320	FAST_INTR(6,fastintr6)
321	FAST_INTR(7,fastintr7)
322	FAST_INTR(8,fastintr8)
323	FAST_INTR(9,fastintr9)
324	FAST_INTR(10,fastintr10)
325	FAST_INTR(11,fastintr11)
326	FAST_INTR(12,fastintr12)
327	FAST_INTR(13,fastintr13)
328	FAST_INTR(14,fastintr14)
329	FAST_INTR(15,fastintr15)
330	FAST_INTR(16,fastintr16)
331	FAST_INTR(17,fastintr17)
332	FAST_INTR(18,fastintr18)
333	FAST_INTR(19,fastintr19)
334	FAST_INTR(20,fastintr20)
335	FAST_INTR(21,fastintr21)
336	FAST_INTR(22,fastintr22)
337	FAST_INTR(23,fastintr23)
338	INTR(0,intr0)
339	INTR(1,intr1)
340	INTR(2,intr2)
341	INTR(3,intr3)
342	INTR(4,intr4)
343	INTR(5,intr5)
344	INTR(6,intr6)
345	INTR(7,intr7)
346	INTR(8,intr8)
347	INTR(9,intr9)
348	INTR(10,intr10)
349	INTR(11,intr11)
350	INTR(12,intr12)
351	INTR(13,intr13)
352	INTR(14,intr14)
353	INTR(15,intr15)
354	INTR(16,intr16)
355	INTR(17,intr17)
356	INTR(18,intr18)
357	INTR(19,intr19)
358	INTR(20,intr20)
359	INTR(21,intr21)
360	INTR(22,intr22)
361	INTR(23,intr23)
362MCOUNT_LABEL(eintr)
363
364	.data
365ihandlers:			/* addresses of interrupt handlers */
366				/* actually resumption addresses for HWI's */
367	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
368	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
369	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
370	.long	Xresume12, Xresume13, Xresume14, Xresume15
371	.long	Xresume16, Xresume17, Xresume18, Xresume19
372	.long	Xresume20, Xresume21, Xresume22, Xresume23
373	.long	swi_tty,   swi_net
374	.long	0, 0, 0, 0
375	.long	_softclock, swi_ast
376
377imasks:				/* masks for interrupt handlers */
378	.space	NHWI*4		/* padding; HWI masks are elsewhere */
379
380	.long	SWI_TTY_MASK, SWI_NET_MASK
381	.long	0, 0, 0, 0
382	.long	SWI_CLOCK_MASK, SWI_AST_MASK
383
384	.globl _ivectors
385_ivectors:
386	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
387	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
388	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
389	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
390	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
391	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
392
393/* active flag for lazy masking */
394iactive:
395	.long	0
396
397#ifdef COUNT_SPURIOUS_INTS
398	.globl	_sihits
399_sihits:
400	.long	0
401#endif /* COUNT_SPURIOUS_INTS */
402
403#ifdef COUNT_XINVLTLB_HITS
404	.globl	_xhits
405_xhits:
406	.space	(NCPU * 4), 0
407#endif /* COUNT_XINVLTLB_HITS */
408
409/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
410	.globl _stopped_cpus, _started_cpus
411_stopped_cpus:
412	.long	0
413_started_cpus:
414	.long	0
415
416#ifdef COUNT_CSHITS
417	.globl	_cshits
418_cshits:
419	.space	(NCPU * 4), 0
420#endif /* COUNT_CSHITS */
421
422#ifdef PEND_INTS
423	.globl	_apic_pin_trigger
424_apic_pin_trigger:
425	.space	(NAPIC * 4), 0
426#endif /* PEND_INTS */
427
428
429/*
430 * Interrupt counters and names.  The format of these and the label names
431 * must agree with what vmstat expects.  The tables are indexed by device
432 * ids so that we don't have to move the names around as devices are
433 * attached.
434 */
435#include "vector.h"
436	.globl	_intrcnt, _eintrcnt
437_intrcnt:
438	.space	(NR_DEVICES + ICU_LEN) * 4
439_eintrcnt:
440
441	.globl	_intrnames, _eintrnames
442_intrnames:
443	.ascii	DEVICE_NAMES
444	.asciz	"stray irq0"
445	.asciz	"stray irq1"
446	.asciz	"stray irq2"
447	.asciz	"stray irq3"
448	.asciz	"stray irq4"
449	.asciz	"stray irq5"
450	.asciz	"stray irq6"
451	.asciz	"stray irq7"
452	.asciz	"stray irq8"
453	.asciz	"stray irq9"
454	.asciz	"stray irq10"
455	.asciz	"stray irq11"
456	.asciz	"stray irq12"
457	.asciz	"stray irq13"
458	.asciz	"stray irq14"
459	.asciz	"stray irq15"
460	.asciz	"stray irq16"
461	.asciz	"stray irq17"
462	.asciz	"stray irq18"
463	.asciz	"stray irq19"
464	.asciz	"stray irq20"
465	.asciz	"stray irq21"
466	.asciz	"stray irq22"
467	.asciz	"stray irq23"
468_eintrnames:
469
470	.text
471