apic_vector.s revision 28487
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.24 1997/08/21 04:52:30 smp Exp smp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9#include <machine/smptests.h>			/** various things... */
10
11#include "i386/isa/intr_machdep.h"
12
13
14#ifdef FAST_SIMPLELOCK
15
16#define GET_FAST_INTR_LOCK						\
17	pushl	$_fast_intr_lock ;		/* address of lock */	\
18	call	_s_lock ;			/* MP-safe */		\
19	addl	$4,%esp
20
21#define REL_FAST_INTR_LOCK						\
22	pushl	$_fast_intr_lock ;		/* address of lock */	\
23	call	_s_unlock ;			/* MP-safe */		\
24	addl	$4,%esp
25
26#else /* FAST_SIMPLELOCK */
27
28#define GET_FAST_INTR_LOCK						\
29	call	_get_isrlock
30
31#define REL_FAST_INTR_LOCK						\
32	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
33	call	_MPrellock ;						\
34	add	$4, %esp
35
36#endif /* FAST_SIMPLELOCK */
37
38/* convert an absolute IRQ# into a bitmask */
39#define IRQ_BIT(irq_num)	(1 << (irq_num))
40
41/* make an index into the IO APIC from the IRQ# */
42#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
43
44
45/*
46 * Macros for interrupt interrupt entry, call to handler, and exit.
47 */
48
49#ifdef FAST_WITHOUTCPL
50
51/*
52 */
53#define	FAST_INTR(irq_num, vec_name)					\
54	.text ;								\
55	SUPERALIGN_TEXT ;						\
56IDTVEC(vec_name) ;							\
57	pushl	%eax ;		/* save only call-used registers */	\
58	pushl	%ecx ;							\
59	pushl	%edx ;							\
60	pushl	%ds ;							\
61	MAYBE_PUSHL_ES ;						\
62	movl	$KDSEL,%eax ;						\
63	movl	%ax,%ds ;						\
64	MAYBE_MOVW_AX_ES ;						\
65	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
66	GET_FAST_INTR_LOCK ;						\
67	pushl	_intr_unit + (irq_num) * 4 ;				\
68	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	REL_FAST_INTR_LOCK ;						\
78	MAYBE_POPL_ES ;							\
79	popl	%ds ;							\
80	popl	%edx ;							\
81	popl	%ecx ;							\
82	popl	%eax ;							\
83	iret
84
85#else
86
87#define	FAST_INTR(irq_num, vec_name)					\
88	.text ;								\
89	SUPERALIGN_TEXT ;						\
90IDTVEC(vec_name) ;							\
91	pushl	%eax ;		/* save only call-used registers */	\
92	pushl	%ecx ;							\
93	pushl	%edx ;							\
94	pushl	%ds ;							\
95	MAYBE_PUSHL_ES ;						\
96	movl	$KDSEL,%eax ;						\
97	movl	%ax,%ds ;						\
98	MAYBE_MOVW_AX_ES ;						\
99	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
100	GET_FAST_INTR_LOCK ;						\
101	pushl	_intr_unit + (irq_num) * 4 ;				\
102	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
103	addl	$4,%esp ;						\
104	movl	$0, lapic_eoi ;						\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	incl	(%eax) ;						\
108	movl	_cpl,%eax ;	/* unmasking pending HWIs or SWIs? */	\
109	notl	%eax ;							\
110	andl	_ipending,%eax ;					\
111	jne	2f ; 		/* yes, maybe handle them */		\
1121: ;									\
113	MEXITCOUNT ;							\
114	REL_FAST_INTR_LOCK ;						\
115	MAYBE_POPL_ES ;							\
116	popl	%ds ;							\
117	popl	%edx ;							\
118	popl	%ecx ;							\
119	popl	%eax ;							\
120	iret ;								\
121;									\
122	ALIGN_TEXT ;							\
1232: ;									\
124	cmpb	$3,_intr_nesting_level ;	/* enough stack? */	\
125	jae	1b ;		/* no, return */			\
126	movl	_cpl,%eax ;						\
127	/* XXX next line is probably unnecessary now. */		\
128	movl	$HWI_MASK|SWI_MASK,_cpl ;	/* limit nesting ... */	\
129	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
130	sti ;			/* to do this as early as possible */	\
131	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
132	popl	%ecx ;		/* ... original %ds ... */		\
133	popl	%edx ;							\
134	xchgl	%eax,4(%esp) ;	/* orig %eax; save cpl */		\
135	pushal ;		/* build fat frame (grrr) ... */	\
136	pushl	%ecx ;		/* ... actually %ds ... */		\
137	pushl	%es ;							\
138	movl	$KDSEL,%eax ;						\
139	movl	%ax,%es ;						\
140	movl	(2+8+0)*4(%esp),%ecx ;	/* %ecx from thin frame ... */	\
141	movl	%ecx,(2+6)*4(%esp) ;	/* ... to fat frame ... */	\
142	movl	(2+8+1)*4(%esp),%eax ;	/* ... cpl from thin frame */	\
143	pushl	%eax ;							\
144	subl	$4,%esp ;	/* junk for unit number */		\
145	MEXITCOUNT ;							\
146	jmp	_doreti
147
148#endif /** FAST_WITHOUTCPL */
149
150
151/*
152 *
153 */
154#define PUSH_FRAME							\
155	pushl	$0 ;		/* dummy error code */			\
156	pushl	$0 ;		/* dummy trap type */			\
157	pushal ;							\
158	pushl	%ds ;		/* save data and extra segments ... */	\
159	pushl	%es
160
161#define POP_FRAME							\
162	popl	%es ;							\
163	popl	%ds ;							\
164	popal ;								\
165	addl	$4+4,%esp
166
167/*
168 * Test to see whether we are handling an edge or level triggered INT.
169 *  Level-triggered INTs must still be masked as we don't clear the source,
170 *  and the EOI cycle would cause redundant INTs to occur.
171 */
172#define MASK_LEVEL_IRQ(irq_num)						\
173	IMASK_LOCK ;				/* into critical reg */	\
174	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
175	jz	8f ;				/* edge, don't mask */	\
176	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
177	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
178	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
179	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
180	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
181	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1828: ;									\
183	IMASK_UNLOCK
184
185/*
186 * Test to see if the source is currntly masked, clear if so.
187 */
188#define UNMASK_IRQ(irq_num)					\
189	IMASK_LOCK ;				/* into critical reg */	\
190	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
191	je	9f ;							\
192	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
193	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
194	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
195	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
196	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
197	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1989: ;									\
199	IMASK_UNLOCK
200
201#define	INTR(irq_num, vec_name)						\
202	.text ;								\
203	SUPERALIGN_TEXT ;						\
204IDTVEC(vec_name) ;							\
205	PUSH_FRAME ;							\
206	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
207	movl	%ax, %ds ;						\
208	movl	%ax, %es ;						\
209;									\
210	lock ;					/* MP-safe */		\
211	btsl	$(irq_num), iactive ;		/* lazy masking */	\
212	jc	1f ;				/* already active */	\
213;									\
214	ISR_TRYLOCK ;		/* XXX this is going away... */		\
215	testl	%eax, %eax ;			/* did we get it? */	\
216	jz	1f ;				/* no */		\
217;									\
218	CPL_LOCK ;				/* MP-safe */		\
219	testl	$IRQ_BIT(irq_num), _cpl ;				\
220	jne	2f ;							\
221	orl	$IRQ_BIT(irq_num), _cil ;				\
222	CPL_UNLOCK ;							\
223;									\
224	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
225	incb	_intr_nesting_level ;					\
226__CONCAT(Xresume,irq_num): ;						\
227	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
228	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
229	movl	_intr_countp + (irq_num) * 4, %eax ;			\
230	lock ;	incl	(%eax) ;					\
231;									\
232	CPL_LOCK ;				/* MP-safe */		\
233	movl	_cpl, %eax ;						\
234	pushl	%eax ;							\
235	orl	_intr_mask + (irq_num) * 4, %eax ;			\
236	movl	%eax, _cpl ;						\
237	CPL_UNLOCK ;							\
238;									\
239	pushl	_intr_unit + (irq_num) * 4 ;				\
240	sti ;								\
241	call	*_intr_handler + (irq_num) * 4 ;			\
242	cli ;								\
243;									\
244	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
245	UNMASK_IRQ(irq_num) ;						\
246	sti ;				/* doreti repeats cli/sti */	\
247	MEXITCOUNT ;							\
248	jmp	_doreti ;						\
249;									\
250	ALIGN_TEXT ;							\
2511: ;						/* active or locked */	\
252	MASK_LEVEL_IRQ(irq_num) ;					\
253	movl	$0, lapic_eoi ;			/* do the EOI */	\
254;									\
255	CPL_LOCK ;				/* MP-safe */		\
256	orl	$IRQ_BIT(irq_num), _ipending ;				\
257	CPL_UNLOCK ;							\
258;									\
259	POP_FRAME ;							\
260	iret ;								\
261;									\
262	ALIGN_TEXT ;							\
2632: ;						/* masked by cpl */	\
264	CPL_UNLOCK ;							\
265	ISR_RELLOCK ;		/* XXX this is going away... */		\
266	jmp	1b
267
268
269/*
270 * Handle "spurious INTerrupts".
271 * Notes:
272 *  This is different than the "spurious INTerrupt" generated by an
273 *   8259 PIC for missing INTs.  See the APIC documentation for details.
274 *  This routine should NOT do an 'EOI' cycle.
275 */
276	.text
277	SUPERALIGN_TEXT
278	.globl _Xspuriousint
279_Xspuriousint:
280
281	/* No EOI cycle used here */
282
283	iret
284
285
286/*
287 * Handle TLB shootdowns.
288 */
289	.text
290	SUPERALIGN_TEXT
291	.globl	_Xinvltlb
292_Xinvltlb:
293	pushl	%eax
294
295#ifdef COUNT_XINVLTLB_HITS
296	ss
297	movl	_cpuid, %eax
298	ss
299	incl	_xhits(,%eax,4)
300#endif /* COUNT_XINVLTLB_HITS */
301
302	movl	%cr3, %eax		/* invalidate the TLB */
303	movl	%eax, %cr3
304
305	ss				/* stack segment, avoid %ds load */
306	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
307
308	popl	%eax
309	iret
310
311
312/*
313 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
314 *
315 *  - Signals its receipt.
316 *  - Waits for permission to restart.
317 *  - Signals its restart.
318 */
319
320	.text
321	SUPERALIGN_TEXT
322	.globl _Xcpustop
323_Xcpustop:
324	pushl	%eax
325	pushl	%ds			/* save current data segment */
326
327	movl	$KDSEL, %eax
328	movl	%ax, %ds		/* use KERNEL data segment */
329
330	movl	_cpuid, %eax
331
332	ASMPOSTCODE_HI(0x1)
333
334	lock
335	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
336
337	ASMPOSTCODE_HI(0x2);
3381:
339	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
340	jnc	1b
341
342	ASMPOSTCODE_HI(0x3)
343
344	lock
345	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
346
347	ASMPOSTCODE_HI(0x4)
348
349	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
350
351	popl	%ds			/* restore previous data segment */
352	popl	%eax
353	iret
354
355
356MCOUNT_LABEL(bintr)
357	FAST_INTR(0,fastintr0)
358	FAST_INTR(1,fastintr1)
359	FAST_INTR(2,fastintr2)
360	FAST_INTR(3,fastintr3)
361	FAST_INTR(4,fastintr4)
362	FAST_INTR(5,fastintr5)
363	FAST_INTR(6,fastintr6)
364	FAST_INTR(7,fastintr7)
365	FAST_INTR(8,fastintr8)
366	FAST_INTR(9,fastintr9)
367	FAST_INTR(10,fastintr10)
368	FAST_INTR(11,fastintr11)
369	FAST_INTR(12,fastintr12)
370	FAST_INTR(13,fastintr13)
371	FAST_INTR(14,fastintr14)
372	FAST_INTR(15,fastintr15)
373	FAST_INTR(16,fastintr16)
374	FAST_INTR(17,fastintr17)
375	FAST_INTR(18,fastintr18)
376	FAST_INTR(19,fastintr19)
377	FAST_INTR(20,fastintr20)
378	FAST_INTR(21,fastintr21)
379	FAST_INTR(22,fastintr22)
380	FAST_INTR(23,fastintr23)
381	INTR(0,intr0)
382	INTR(1,intr1)
383	INTR(2,intr2)
384	INTR(3,intr3)
385	INTR(4,intr4)
386	INTR(5,intr5)
387	INTR(6,intr6)
388	INTR(7,intr7)
389	INTR(8,intr8)
390	INTR(9,intr9)
391	INTR(10,intr10)
392	INTR(11,intr11)
393	INTR(12,intr12)
394	INTR(13,intr13)
395	INTR(14,intr14)
396	INTR(15,intr15)
397	INTR(16,intr16)
398	INTR(17,intr17)
399	INTR(18,intr18)
400	INTR(19,intr19)
401	INTR(20,intr20)
402	INTR(21,intr21)
403	INTR(22,intr22)
404	INTR(23,intr23)
405MCOUNT_LABEL(eintr)
406
407	.data
408ihandlers:			/* addresses of interrupt handlers */
409				/* actually resumption addresses for HWI's */
410	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
411	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
412	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
413	.long	Xresume12, Xresume13, Xresume14, Xresume15
414	.long	Xresume16, Xresume17, Xresume18, Xresume19
415	.long	Xresume20, Xresume21, Xresume22, Xresume23
416	.long	swi_tty,   swi_net
417	.long	0, 0, 0, 0
418	.long	_softclock, swi_ast
419
420imasks:				/* masks for interrupt handlers */
421	.space	NHWI*4		/* padding; HWI masks are elsewhere */
422
423	.long	SWI_TTY_MASK, SWI_NET_MASK
424	.long	0, 0, 0, 0
425	.long	SWI_CLOCK_MASK, SWI_AST_MASK
426
427	.globl _ivectors
428_ivectors:
429	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
430	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
431	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
432	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
433	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
434	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
435
436/* active flag for lazy masking */
437iactive:
438	.long	0
439
440#ifdef COUNT_XINVLTLB_HITS
441	.globl	_xhits
442_xhits:
443	.space	(NCPU * 4), 0
444#endif /* COUNT_XINVLTLB_HITS */
445
446/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
447	.globl _stopped_cpus, _started_cpus
448_stopped_cpus:
449	.long	0
450_started_cpus:
451	.long	0
452
453	.globl	_apic_pin_trigger
454_apic_pin_trigger:
455	.space	(NAPIC * 4), 0
456
457
458/*
459 * Interrupt counters and names.  The format of these and the label names
460 * must agree with what vmstat expects.  The tables are indexed by device
461 * ids so that we don't have to move the names around as devices are
462 * attached.
463 */
464#include "vector.h"
465	.globl	_intrcnt, _eintrcnt
466_intrcnt:
467	.space	(NR_DEVICES + ICU_LEN) * 4
468_eintrcnt:
469
470	.globl	_intrnames, _eintrnames
471_intrnames:
472	.ascii	DEVICE_NAMES
473	.asciz	"stray irq0"
474	.asciz	"stray irq1"
475	.asciz	"stray irq2"
476	.asciz	"stray irq3"
477	.asciz	"stray irq4"
478	.asciz	"stray irq5"
479	.asciz	"stray irq6"
480	.asciz	"stray irq7"
481	.asciz	"stray irq8"
482	.asciz	"stray irq9"
483	.asciz	"stray irq10"
484	.asciz	"stray irq11"
485	.asciz	"stray irq12"
486	.asciz	"stray irq13"
487	.asciz	"stray irq14"
488	.asciz	"stray irq15"
489	.asciz	"stray irq16"
490	.asciz	"stray irq17"
491	.asciz	"stray irq18"
492	.asciz	"stray irq19"
493	.asciz	"stray irq20"
494	.asciz	"stray irq21"
495	.asciz	"stray irq22"
496	.asciz	"stray irq23"
497_eintrnames:
498
499	.text
500