apic_vector.s revision 28641
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.27 1997/08/23 05:15:12 smp Exp smp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9#include <machine/smptests.h>			/** various things... */
10
11#include "i386/isa/intr_machdep.h"
12
13
14#if defined(SMP) && defined(REAL_AVCPL)
15
16#define AVCPL_LOCK	CPL_LOCK
17#define AVCPL_UNLOCK	CPL_UNLOCK
18
19#else
20
21#define AVCPL_LOCK
22#define AVCPL_UNLOCK
23
24#endif
25
26#ifdef FAST_SIMPLELOCK
27
28#define GET_FAST_INTR_LOCK						\
29	pushl	$_fast_intr_lock ;		/* address of lock */	\
30	call	_s_lock ;			/* MP-safe */		\
31	addl	$4,%esp
32
33#define REL_FAST_INTR_LOCK						\
34	pushl	$_fast_intr_lock ;		/* address of lock */	\
35	call	_s_unlock ;			/* MP-safe */		\
36	addl	$4,%esp
37
38#else /* FAST_SIMPLELOCK */
39
40#define GET_FAST_INTR_LOCK						\
41	call	_get_isrlock
42
43#define REL_FAST_INTR_LOCK						\
44	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
45	call	_MPrellock ;						\
46	add	$4, %esp
47
48#endif /* FAST_SIMPLELOCK */
49
50/* convert an absolute IRQ# into a bitmask */
51#define IRQ_BIT(irq_num)	(1 << (irq_num))
52
53/* make an index into the IO APIC from the IRQ# */
54#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
55
56
57/*
58 * Macros for interrupt interrupt entry, call to handler, and exit.
59 */
60
61#ifdef FAST_WITHOUTCPL
62
63/*
64 */
65#define	FAST_INTR(irq_num, vec_name)					\
66	.text ;								\
67	SUPERALIGN_TEXT ;						\
68IDTVEC(vec_name) ;							\
69	pushl	%eax ;		/* save only call-used registers */	\
70	pushl	%ecx ;							\
71	pushl	%edx ;							\
72	pushl	%ds ;							\
73	MAYBE_PUSHL_ES ;						\
74	movl	$KDSEL,%eax ;						\
75	movl	%ax,%ds ;						\
76	MAYBE_MOVW_AX_ES ;						\
77	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
78	GET_FAST_INTR_LOCK ;						\
79	pushl	_intr_unit + (irq_num) * 4 ;				\
80	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
81	addl	$4, %esp ;						\
82	movl	$0, lapic_eoi ;						\
83	lock ; 								\
84	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
85	movl	_intr_countp + (irq_num) * 4, %eax ;			\
86	lock ; 								\
87	incl	(%eax) ;						\
88	MEXITCOUNT ;							\
89	REL_FAST_INTR_LOCK ;						\
90	MAYBE_POPL_ES ;							\
91	popl	%ds ;							\
92	popl	%edx ;							\
93	popl	%ecx ;							\
94	popl	%eax ;							\
95	iret
96
97#else /* FAST_WITHOUTCPL */
98
99#define	FAST_INTR(irq_num, vec_name)					\
100	.text ;								\
101	SUPERALIGN_TEXT ;						\
102IDTVEC(vec_name) ;							\
103	pushl	%eax ;		/* save only call-used registers */	\
104	pushl	%ecx ;							\
105	pushl	%edx ;							\
106	pushl	%ds ;							\
107	MAYBE_PUSHL_ES ;						\
108	movl	$KDSEL, %eax ;						\
109	movl	%ax, %ds ;						\
110	MAYBE_MOVW_AX_ES ;						\
111	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
112	GET_FAST_INTR_LOCK ;						\
113	pushl	_intr_unit + (irq_num) * 4 ;				\
114	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
115	addl	$4, %esp ;						\
116	movl	$0, lapic_eoi ;						\
117	lock ; 								\
118	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
119	movl	_intr_countp + (irq_num) * 4,%eax ;			\
120	lock ; 								\
121	incl	(%eax) ;						\
122	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
123	notl	%eax ;							\
124	andl	_ipending, %eax ;					\
125	jne	2f ; 		/* yes, maybe handle them */		\
1261: ;									\
127	MEXITCOUNT ;							\
128	REL_FAST_INTR_LOCK ;						\
129	MAYBE_POPL_ES ;							\
130	popl	%ds ;							\
131	popl	%edx ;							\
132	popl	%ecx ;							\
133	popl	%eax ;							\
134	iret ;								\
135;									\
136	ALIGN_TEXT ;							\
1372: ;									\
138	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
139	jae	1b ;		/* no, return */			\
140	movl	_cpl, %eax ;						\
141	/* XXX next line is probably unnecessary now. */		\
142	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
143	lock ; 								\
144	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
145	sti ;			/* to do this as early as possible */	\
146	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
147	popl	%ecx ;		/* ... original %ds ... */		\
148	popl	%edx ;							\
149	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
150	pushal ;		/* build fat frame (grrr) ... */	\
151	pushl	%ecx ;		/* ... actually %ds ... */		\
152	pushl	%es ;							\
153	movl	$KDSEL, %eax ;						\
154	movl	%ax, %es ;						\
155	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
156	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
157	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
158	pushl	%eax ;							\
159	subl	$4, %esp ;	/* junk for unit number */		\
160	MEXITCOUNT ;							\
161	jmp	_doreti
162
163#endif /** FAST_WITHOUTCPL */
164
165
166/*
167 *
168 */
169#define PUSH_FRAME							\
170	pushl	$0 ;		/* dummy error code */			\
171	pushl	$0 ;		/* dummy trap type */			\
172	pushal ;							\
173	pushl	%ds ;		/* save data and extra segments ... */	\
174	pushl	%es
175
176#define POP_FRAME							\
177	popl	%es ;							\
178	popl	%ds ;							\
179	popal ;								\
180	addl	$4+4,%esp
181
182/*
183 * Test to see whether we are handling an edge or level triggered INT.
184 *  Level-triggered INTs must still be masked as we don't clear the source,
185 *  and the EOI cycle would cause redundant INTs to occur.
186 */
187#define MASK_LEVEL_IRQ(irq_num)						\
188	IMASK_LOCK ;				/* into critical reg */	\
189	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
190	jz	8f ;				/* edge, don't mask */	\
191	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
192	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
193	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1978: ;									\
198	IMASK_UNLOCK
199
200/*
201 * Test to see if the source is currntly masked, clear if so.
202 */
203#define UNMASK_IRQ(irq_num)					\
204	IMASK_LOCK ;				/* into critical reg */	\
205	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
206	je	9f ;							\
207	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
208	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
209	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
210	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
211	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
212	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2139: ;									\
214	IMASK_UNLOCK
215
216#define	INTR(irq_num, vec_name)						\
217	.text ;								\
218	SUPERALIGN_TEXT ;						\
219IDTVEC(vec_name) ;							\
220	PUSH_FRAME ;							\
221	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
222	movl	%ax, %ds ;						\
223	movl	%ax, %es ;						\
224;									\
225	lock ;					/* MP-safe */		\
226	btsl	$(irq_num), iactive ;		/* lazy masking */	\
227	jc	1f ;				/* already active */	\
228;									\
229	ISR_TRYLOCK ;		/* XXX this is going away... */		\
230	testl	%eax, %eax ;			/* did we get it? */	\
231	jz	1f ;				/* no */		\
232;									\
233	AVCPL_LOCK ;				/* MP-safe */		\
234	testl	$IRQ_BIT(irq_num), _cpl ;				\
235	jne	2f ;				/* this INT masked */	\
236	orl	$IRQ_BIT(irq_num), _cil ;				\
237	AVCPL_UNLOCK ;							\
238;									\
239	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
240	incb	_intr_nesting_level ;					\
241__CONCAT(Xresume,irq_num): ;						\
242	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
243	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
244	movl	_intr_countp + (irq_num) * 4, %eax ;			\
245	lock ;	incl	(%eax) ;					\
246;									\
247	AVCPL_LOCK ;				/* MP-safe */		\
248	movl	_cpl, %eax ;						\
249	pushl	%eax ;							\
250	orl	_intr_mask + (irq_num) * 4, %eax ;			\
251	movl	%eax, _cpl ;						\
252	AVCPL_UNLOCK ;							\
253;									\
254	pushl	_intr_unit + (irq_num) * 4 ;				\
255	sti ;								\
256	call	*_intr_handler + (irq_num) * 4 ;			\
257	cli ;								\
258;									\
259	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
260	UNMASK_IRQ(irq_num) ;						\
261	sti ;				/* doreti repeats cli/sti */	\
262	MEXITCOUNT ;							\
263	jmp	_doreti ;						\
264;									\
265	ALIGN_TEXT ;							\
2661: ;						/* active or locked */	\
267	MASK_LEVEL_IRQ(irq_num) ;					\
268	movl	$0, lapic_eoi ;			/* do the EOI */	\
269;									\
270	AVCPL_LOCK ;				/* MP-safe */		\
271	orl	$IRQ_BIT(irq_num), _ipending ;				\
272	AVCPL_UNLOCK ;							\
273;									\
274	POP_FRAME ;							\
275	iret ;								\
276;									\
277	ALIGN_TEXT ;							\
2782: ;						/* masked by cpl */	\
279	AVCPL_UNLOCK ;							\
280	ISR_RELLOCK ;		/* XXX this is going away... */		\
281	jmp	1b
282
283
284/*
285 * Handle "spurious INTerrupts".
286 * Notes:
287 *  This is different than the "spurious INTerrupt" generated by an
288 *   8259 PIC for missing INTs.  See the APIC documentation for details.
289 *  This routine should NOT do an 'EOI' cycle.
290 */
291	.text
292	SUPERALIGN_TEXT
293	.globl _Xspuriousint
294_Xspuriousint:
295
296	/* No EOI cycle used here */
297
298	iret
299
300
301/*
302 * Handle TLB shootdowns.
303 */
304	.text
305	SUPERALIGN_TEXT
306	.globl	_Xinvltlb
307_Xinvltlb:
308	pushl	%eax
309
310#ifdef COUNT_XINVLTLB_HITS
311	ss
312	movl	_cpuid, %eax
313	ss
314	incl	_xhits(,%eax,4)
315#endif /* COUNT_XINVLTLB_HITS */
316
317	movl	%cr3, %eax		/* invalidate the TLB */
318	movl	%eax, %cr3
319
320	ss				/* stack segment, avoid %ds load */
321	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
322
323	popl	%eax
324	iret
325
326
327/*
328 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
329 *
330 *  - Signals its receipt.
331 *  - Waits for permission to restart.
332 *  - Signals its restart.
333 */
334
335	.text
336	SUPERALIGN_TEXT
337	.globl _Xcpustop
338_Xcpustop:
339	pushl	%eax
340	pushl	%ds			/* save current data segment */
341
342	movl	$KDSEL, %eax
343	movl	%ax, %ds		/* use KERNEL data segment */
344
345	movl	_cpuid, %eax
346
347	ASMPOSTCODE_HI(0x1)
348
349	lock
350	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
351
352	ASMPOSTCODE_HI(0x2);
3531:
354	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
355	jnc	1b
356
357	ASMPOSTCODE_HI(0x3)
358
359	lock
360	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
361
362	ASMPOSTCODE_HI(0x4)
363
364	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
365
366	popl	%ds			/* restore previous data segment */
367	popl	%eax
368	iret
369
370
371MCOUNT_LABEL(bintr)
372	FAST_INTR(0,fastintr0)
373	FAST_INTR(1,fastintr1)
374	FAST_INTR(2,fastintr2)
375	FAST_INTR(3,fastintr3)
376	FAST_INTR(4,fastintr4)
377	FAST_INTR(5,fastintr5)
378	FAST_INTR(6,fastintr6)
379	FAST_INTR(7,fastintr7)
380	FAST_INTR(8,fastintr8)
381	FAST_INTR(9,fastintr9)
382	FAST_INTR(10,fastintr10)
383	FAST_INTR(11,fastintr11)
384	FAST_INTR(12,fastintr12)
385	FAST_INTR(13,fastintr13)
386	FAST_INTR(14,fastintr14)
387	FAST_INTR(15,fastintr15)
388	FAST_INTR(16,fastintr16)
389	FAST_INTR(17,fastintr17)
390	FAST_INTR(18,fastintr18)
391	FAST_INTR(19,fastintr19)
392	FAST_INTR(20,fastintr20)
393	FAST_INTR(21,fastintr21)
394	FAST_INTR(22,fastintr22)
395	FAST_INTR(23,fastintr23)
396	INTR(0,intr0)
397	INTR(1,intr1)
398	INTR(2,intr2)
399	INTR(3,intr3)
400	INTR(4,intr4)
401	INTR(5,intr5)
402	INTR(6,intr6)
403	INTR(7,intr7)
404	INTR(8,intr8)
405	INTR(9,intr9)
406	INTR(10,intr10)
407	INTR(11,intr11)
408	INTR(12,intr12)
409	INTR(13,intr13)
410	INTR(14,intr14)
411	INTR(15,intr15)
412	INTR(16,intr16)
413	INTR(17,intr17)
414	INTR(18,intr18)
415	INTR(19,intr19)
416	INTR(20,intr20)
417	INTR(21,intr21)
418	INTR(22,intr22)
419	INTR(23,intr23)
420MCOUNT_LABEL(eintr)
421
422	.data
423ihandlers:			/* addresses of interrupt handlers */
424				/* actually resumption addresses for HWI's */
425	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
426	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
427	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
428	.long	Xresume12, Xresume13, Xresume14, Xresume15
429	.long	Xresume16, Xresume17, Xresume18, Xresume19
430	.long	Xresume20, Xresume21, Xresume22, Xresume23
431	.long	swi_tty,   swi_net
432	.long	0, 0, 0, 0
433	.long	_softclock, swi_ast
434
435imasks:				/* masks for interrupt handlers */
436	.space	NHWI*4		/* padding; HWI masks are elsewhere */
437
438	.long	SWI_TTY_MASK, SWI_NET_MASK
439	.long	0, 0, 0, 0
440	.long	SWI_CLOCK_MASK, SWI_AST_MASK
441
442	.globl _ivectors
443_ivectors:
444	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
445	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
446	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
447	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
448	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
449	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
450
451/* active flag for lazy masking */
452iactive:
453	.long	0
454
455#ifdef COUNT_XINVLTLB_HITS
456	.globl	_xhits
457_xhits:
458	.space	(NCPU * 4), 0
459#endif /* COUNT_XINVLTLB_HITS */
460
461/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
462	.globl _stopped_cpus, _started_cpus
463_stopped_cpus:
464	.long	0
465_started_cpus:
466	.long	0
467
468	.globl	_apic_pin_trigger
469_apic_pin_trigger:
470	.space	(NAPIC * 4), 0
471
472
473/*
474 * Interrupt counters and names.  The format of these and the label names
475 * must agree with what vmstat expects.  The tables are indexed by device
476 * ids so that we don't have to move the names around as devices are
477 * attached.
478 */
479#include "vector.h"
480	.globl	_intrcnt, _eintrcnt
481_intrcnt:
482	.space	(NR_DEVICES + ICU_LEN) * 4
483_eintrcnt:
484
485	.globl	_intrnames, _eintrnames
486_intrnames:
487	.ascii	DEVICE_NAMES
488	.asciz	"stray irq0"
489	.asciz	"stray irq1"
490	.asciz	"stray irq2"
491	.asciz	"stray irq3"
492	.asciz	"stray irq4"
493	.asciz	"stray irq5"
494	.asciz	"stray irq6"
495	.asciz	"stray irq7"
496	.asciz	"stray irq8"
497	.asciz	"stray irq9"
498	.asciz	"stray irq10"
499	.asciz	"stray irq11"
500	.asciz	"stray irq12"
501	.asciz	"stray irq13"
502	.asciz	"stray irq14"
503	.asciz	"stray irq15"
504	.asciz	"stray irq16"
505	.asciz	"stray irq17"
506	.asciz	"stray irq18"
507	.asciz	"stray irq19"
508	.asciz	"stray irq20"
509	.asciz	"stray irq21"
510	.asciz	"stray irq22"
511	.asciz	"stray irq23"
512_eintrnames:
513
514	.text
515