apic_vector.s revision 28921
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.33 1997/08/30 01:23:40 smp Exp smp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9#include <machine/smptests.h>			/** various things... */
10
11#include "i386/isa/intr_machdep.h"
12
13
14#ifdef REAL_AVCPL
15
16#define AVCPL_LOCK	CPL_LOCK
17#define AVCPL_UNLOCK	CPL_UNLOCK
18
19#else /* REAL_AVCPL */
20
21#define AVCPL_LOCK
22#define AVCPL_UNLOCK
23
24#endif /* REAL_AVCPL */
25
26#ifdef FAST_SIMPLELOCK
27
28#define GET_FAST_INTR_LOCK						\
29	pushl	$_fast_intr_lock ;		/* address of lock */	\
30	call	_s_lock ;			/* MP-safe */		\
31	addl	$4,%esp
32
33#define REL_FAST_INTR_LOCK						\
34	pushl	$_fast_intr_lock ;		/* address of lock */	\
35	call	_s_unlock ;			/* MP-safe */		\
36	addl	$4,%esp
37
38#else /* FAST_SIMPLELOCK */
39
40#define GET_FAST_INTR_LOCK						\
41	call	_get_isrlock
42
43#define REL_FAST_INTR_LOCK						\
44	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
45	call	_MPrellock ;						\
46	add	$4, %esp
47
48#endif /* FAST_SIMPLELOCK */
49
50/* convert an absolute IRQ# into a bitmask */
51#define IRQ_BIT(irq_num)	(1 << (irq_num))
52
53/* make an index into the IO APIC from the IRQ# */
54#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
55
56
57/*
58 * Macros for interrupt interrupt entry, call to handler, and exit.
59 */
60
61#ifdef FAST_WITHOUTCPL
62
63/*
64 */
65#define	FAST_INTR(irq_num, vec_name)					\
66	.text ;								\
67	SUPERALIGN_TEXT ;						\
68IDTVEC(vec_name) ;							\
69	pushl	%eax ;		/* save only call-used registers */	\
70	pushl	%ecx ;							\
71	pushl	%edx ;							\
72	pushl	%ds ;							\
73	MAYBE_PUSHL_ES ;						\
74	movl	$KDSEL,%eax ;						\
75	movl	%ax,%ds ;						\
76	MAYBE_MOVW_AX_ES ;						\
77	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
78	pushl	_intr_unit + (irq_num) * 4 ;				\
79	GET_FAST_INTR_LOCK ;						\
80	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
81	REL_FAST_INTR_LOCK ;						\
82	addl	$4, %esp ;						\
83	movl	$0, lapic_eoi ;						\
84	lock ; 								\
85	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
86	movl	_intr_countp + (irq_num) * 4, %eax ;			\
87	lock ; 								\
88	incl	(%eax) ;						\
89	MEXITCOUNT ;							\
90	MAYBE_POPL_ES ;							\
91	popl	%ds ;							\
92	popl	%edx ;							\
93	popl	%ecx ;							\
94	popl	%eax ;							\
95	iret
96
97#else /* FAST_WITHOUTCPL */
98
99#define	FAST_INTR(irq_num, vec_name)					\
100	.text ;								\
101	SUPERALIGN_TEXT ;						\
102IDTVEC(vec_name) ;							\
103	pushl	%eax ;		/* save only call-used registers */	\
104	pushl	%ecx ;							\
105	pushl	%edx ;							\
106	pushl	%ds ;							\
107	MAYBE_PUSHL_ES ;						\
108	movl	$KDSEL, %eax ;						\
109	movl	%ax, %ds ;						\
110	MAYBE_MOVW_AX_ES ;						\
111	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
112	GET_FAST_INTR_LOCK ;						\
113	pushl	_intr_unit + (irq_num) * 4 ;				\
114	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
115	addl	$4, %esp ;						\
116	movl	$0, lapic_eoi ;						\
117	lock ; 								\
118	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
119	movl	_intr_countp + (irq_num) * 4,%eax ;			\
120	lock ; 								\
121	incl	(%eax) ;						\
122	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
123	notl	%eax ;							\
124	andl	_ipending, %eax ;					\
125	jne	2f ; 		/* yes, maybe handle them */		\
1261: ;									\
127	MEXITCOUNT ;							\
128	REL_FAST_INTR_LOCK ;						\
129	MAYBE_POPL_ES ;							\
130	popl	%ds ;							\
131	popl	%edx ;							\
132	popl	%ecx ;							\
133	popl	%eax ;							\
134	iret ;								\
135;									\
136	ALIGN_TEXT ;							\
1372: ;									\
138	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
139	jae	1b ;		/* no, return */			\
140	movl	_cpl, %eax ;						\
141	/* XXX next line is probably unnecessary now. */		\
142	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
143	lock ; 								\
144	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
145	sti ;			/* to do this as early as possible */	\
146	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
147	popl	%ecx ;		/* ... original %ds ... */		\
148	popl	%edx ;							\
149	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
150	pushal ;		/* build fat frame (grrr) ... */	\
151	pushl	%ecx ;		/* ... actually %ds ... */		\
152	pushl	%es ;							\
153	movl	$KDSEL, %eax ;						\
154	movl	%ax, %es ;						\
155	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
156	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
157	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
158	pushl	%eax ;							\
159	subl	$4, %esp ;	/* junk for unit number */		\
160	MEXITCOUNT ;							\
161	jmp	_doreti
162
163#endif /** FAST_WITHOUTCPL */
164
165
166/*
167 *
168 */
169#define PUSH_FRAME							\
170	pushl	$0 ;		/* dummy error code */			\
171	pushl	$0 ;		/* dummy trap type */			\
172	pushal ;							\
173	pushl	%ds ;		/* save data and extra segments ... */	\
174	pushl	%es
175
176#define POP_FRAME							\
177	popl	%es ;							\
178	popl	%ds ;							\
179	popal ;								\
180	addl	$4+4,%esp
181
182/*
183 * Test to see whether we are handling an edge or level triggered INT.
184 *  Level-triggered INTs must still be masked as we don't clear the source,
185 *  and the EOI cycle would cause redundant INTs to occur.
186 */
187#define MASK_LEVEL_IRQ(irq_num)						\
188	IMASK_LOCK ;				/* into critical reg */	\
189	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
190	jz	8f ;				/* edge, don't mask */	\
191	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
192	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
193	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
194	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
195	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
196	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1978: ;									\
198	IMASK_UNLOCK
199
200/*
201 * Test to see if the source is currntly masked, clear if so.
202 */
203#define UNMASK_IRQ(irq_num)					\
204	IMASK_LOCK ;				/* into critical reg */	\
205	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
206	je	9f ;							\
207	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
208	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
209	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
210	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
211	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
212	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2139: ;									\
214	IMASK_UNLOCK
215
216#ifdef INTR_SIMPLELOCK
217
218#define	INTR(irq_num, vec_name)						\
219	.text ;								\
220	SUPERALIGN_TEXT ;						\
221IDTVEC(vec_name) ;							\
222	PUSH_FRAME ;							\
223	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
224	movl	%ax, %ds ;						\
225	movl	%ax, %es ;						\
226;									\
227	lock ;					/* MP-safe */		\
228	btsl	$(irq_num), iactive ;		/* lazy masking */	\
229	jc	1f ;				/* already active */	\
230;									\
231	ISR_TRYLOCK ;		/* XXX this is going away... */		\
232	testl	%eax, %eax ;			/* did we get it? */	\
233	jz	1f ;				/* no */		\
234;									\
235	AVCPL_LOCK ;				/* MP-safe */		\
236	testl	$IRQ_BIT(irq_num), _cpl ;				\
237	jne	2f ;				/* this INT masked */	\
238	testl	$IRQ_BIT(irq_num), _cml ;				\
239	jne	2f ;				/* this INT masked */	\
240	orl	$IRQ_BIT(irq_num), _cil ;				\
241	AVCPL_UNLOCK ;							\
242;									\
243	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
244	incb	_intr_nesting_level ;					\
245__CONCAT(Xresume,irq_num): ;						\
246	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
247	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
248	movl	_intr_countp + (irq_num) * 4, %eax ;			\
249	lock ;	incl	(%eax) ;					\
250;									\
251	AVCPL_LOCK ;				/* MP-safe */		\
252	movl	_cml, %eax ;						\
253	pushl	%eax ;							\
254	orl	_intr_mask + (irq_num) * 4, %eax ;			\
255	movl	%eax, _cml ;						\
256	AVCPL_UNLOCK ;							\
257;									\
258	pushl	_intr_unit + (irq_num) * 4 ;				\
259	sti ;								\
260	call	*_intr_handler + (irq_num) * 4 ;			\
261	cli ;								\
262;									\
263	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
264	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
265	UNMASK_IRQ(irq_num) ;						\
266	sti ;				/* doreti repeats cli/sti */	\
267	MEXITCOUNT ;							\
268	jmp	_doreti ;						\
269;									\
270	ALIGN_TEXT ;							\
2711: ;						/* active or locked */	\
272	MASK_LEVEL_IRQ(irq_num) ;					\
273	movl	$0, lapic_eoi ;			/* do the EOI */	\
274;									\
275	AVCPL_LOCK ;				/* MP-safe */		\
276	orl	$IRQ_BIT(irq_num), _ipending ;				\
277	AVCPL_UNLOCK ;							\
278;									\
279	POP_FRAME ;							\
280	iret ;								\
281;									\
282	ALIGN_TEXT ;							\
2832: ;						/* masked by cpl|cml */	\
284	AVCPL_UNLOCK ;							\
285	ISR_RELLOCK ;		/* XXX this is going away... */		\
286	jmp	1b
287
288#else /* INTR_SIMPLELOCK */
289
290#define	INTR(irq_num, vec_name)						\
291	.text ;								\
292	SUPERALIGN_TEXT ;						\
293IDTVEC(vec_name) ;							\
294	PUSH_FRAME ;							\
295	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
296	movl	%ax, %ds ;						\
297	movl	%ax, %es ;						\
298;									\
299	lock ;					/* MP-safe */		\
300	btsl	$(irq_num), iactive ;		/* lazy masking */	\
301	jc	1f ;				/* already active */	\
302;									\
303	ISR_TRYLOCK ;		/* XXX this is going away... */		\
304	testl	%eax, %eax ;			/* did we get it? */	\
305	jz	1f ;				/* no */		\
306;									\
307	AVCPL_LOCK ;				/* MP-safe */		\
308	testl	$IRQ_BIT(irq_num), _cpl ;				\
309	jne	2f ;				/* this INT masked */	\
310	AVCPL_UNLOCK ;							\
311;									\
312	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
313	incb	_intr_nesting_level ;					\
314__CONCAT(Xresume,irq_num): ;						\
315	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
316	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
317	movl	_intr_countp + (irq_num) * 4, %eax ;			\
318	lock ;	incl	(%eax) ;					\
319;									\
320	AVCPL_LOCK ;				/* MP-safe */		\
321	movl	_cpl, %eax ;						\
322	pushl	%eax ;							\
323	orl	_intr_mask + (irq_num) * 4, %eax ;			\
324	movl	%eax, _cpl ;						\
325	AVCPL_UNLOCK ;							\
326;									\
327	pushl	_intr_unit + (irq_num) * 4 ;				\
328	sti ;								\
329	call	*_intr_handler + (irq_num) * 4 ;			\
330	cli ;								\
331;									\
332	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
333	UNMASK_IRQ(irq_num) ;						\
334	sti ;				/* doreti repeats cli/sti */	\
335	MEXITCOUNT ;							\
336	jmp	_doreti ;						\
337;									\
338	ALIGN_TEXT ;							\
3391: ;						/* active or locked */	\
340	MASK_LEVEL_IRQ(irq_num) ;					\
341	movl	$0, lapic_eoi ;			/* do the EOI */	\
342;									\
343	AVCPL_LOCK ;				/* MP-safe */		\
344	orl	$IRQ_BIT(irq_num), _ipending ;				\
345	AVCPL_UNLOCK ;							\
346;									\
347	POP_FRAME ;							\
348	iret ;								\
349;									\
350	ALIGN_TEXT ;							\
3512: ;						/* masked by cpl */	\
352	AVCPL_UNLOCK ;							\
353	ISR_RELLOCK ;		/* XXX this is going away... */		\
354	jmp	1b
355
356#endif /* INTR_SIMPLELOCK */
357
358
359/*
360 * Handle "spurious INTerrupts".
361 * Notes:
362 *  This is different than the "spurious INTerrupt" generated by an
363 *   8259 PIC for missing INTs.  See the APIC documentation for details.
364 *  This routine should NOT do an 'EOI' cycle.
365 */
366	.text
367	SUPERALIGN_TEXT
368	.globl _Xspuriousint
369_Xspuriousint:
370
371	/* No EOI cycle used here */
372
373	iret
374
375
376/*
377 * Handle TLB shootdowns.
378 */
379	.text
380	SUPERALIGN_TEXT
381	.globl	_Xinvltlb
382_Xinvltlb:
383	pushl	%eax
384
385#ifdef COUNT_XINVLTLB_HITS
386	ss
387	movl	_cpuid, %eax
388	ss
389	incl	_xhits(,%eax,4)
390#endif /* COUNT_XINVLTLB_HITS */
391
392	movl	%cr3, %eax		/* invalidate the TLB */
393	movl	%eax, %cr3
394
395	ss				/* stack segment, avoid %ds load */
396	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
397
398	popl	%eax
399	iret
400
401
402/*
403 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
404 *
405 *  - Signals its receipt.
406 *  - Waits for permission to restart.
407 *  - Signals its restart.
408 */
409
410	.text
411	SUPERALIGN_TEXT
412	.globl _Xcpustop
413_Xcpustop:
414	pushl	%eax
415	pushl	%ds			/* save current data segment */
416
417	movl	$KDSEL, %eax
418	movl	%ax, %ds		/* use KERNEL data segment */
419
420	movl	_cpuid, %eax
421
422	lock
423	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
4241:
425	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
426	jnc	1b
427
428	lock
429	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
430
431	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
432
433	popl	%ds			/* restore previous data segment */
434	popl	%eax
435	iret
436
437
438MCOUNT_LABEL(bintr)
439	FAST_INTR(0,fastintr0)
440	FAST_INTR(1,fastintr1)
441	FAST_INTR(2,fastintr2)
442	FAST_INTR(3,fastintr3)
443	FAST_INTR(4,fastintr4)
444	FAST_INTR(5,fastintr5)
445	FAST_INTR(6,fastintr6)
446	FAST_INTR(7,fastintr7)
447	FAST_INTR(8,fastintr8)
448	FAST_INTR(9,fastintr9)
449	FAST_INTR(10,fastintr10)
450	FAST_INTR(11,fastintr11)
451	FAST_INTR(12,fastintr12)
452	FAST_INTR(13,fastintr13)
453	FAST_INTR(14,fastintr14)
454	FAST_INTR(15,fastintr15)
455	FAST_INTR(16,fastintr16)
456	FAST_INTR(17,fastintr17)
457	FAST_INTR(18,fastintr18)
458	FAST_INTR(19,fastintr19)
459	FAST_INTR(20,fastintr20)
460	FAST_INTR(21,fastintr21)
461	FAST_INTR(22,fastintr22)
462	FAST_INTR(23,fastintr23)
463	INTR(0,intr0)
464	INTR(1,intr1)
465	INTR(2,intr2)
466	INTR(3,intr3)
467	INTR(4,intr4)
468	INTR(5,intr5)
469	INTR(6,intr6)
470	INTR(7,intr7)
471	INTR(8,intr8)
472	INTR(9,intr9)
473	INTR(10,intr10)
474	INTR(11,intr11)
475	INTR(12,intr12)
476	INTR(13,intr13)
477	INTR(14,intr14)
478	INTR(15,intr15)
479	INTR(16,intr16)
480	INTR(17,intr17)
481	INTR(18,intr18)
482	INTR(19,intr19)
483	INTR(20,intr20)
484	INTR(21,intr21)
485	INTR(22,intr22)
486	INTR(23,intr23)
487MCOUNT_LABEL(eintr)
488
489	.data
490ihandlers:			/* addresses of interrupt handlers */
491				/* actually resumption addresses for HWI's */
492	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
493	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
494	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
495	.long	Xresume12, Xresume13, Xresume14, Xresume15
496	.long	Xresume16, Xresume17, Xresume18, Xresume19
497	.long	Xresume20, Xresume21, Xresume22, Xresume23
498	.long	swi_tty,   swi_net
499	.long	0, 0, 0, 0
500	.long	_softclock, swi_ast
501
502imasks:				/* masks for interrupt handlers */
503	.space	NHWI*4		/* padding; HWI masks are elsewhere */
504
505	.long	SWI_TTY_MASK, SWI_NET_MASK
506	.long	0, 0, 0, 0
507	.long	SWI_CLOCK_MASK, SWI_AST_MASK
508
509	.globl _ivectors
510_ivectors:
511	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
512	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
513	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
514	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
515	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
516	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
517
518/* active flag for lazy masking */
519iactive:
520	.long	0
521
522#ifdef COUNT_XINVLTLB_HITS
523	.globl	_xhits
524_xhits:
525	.space	(NCPU * 4), 0
526#endif /* COUNT_XINVLTLB_HITS */
527
528/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
529	.globl _stopped_cpus, _started_cpus
530_stopped_cpus:
531	.long	0
532_started_cpus:
533	.long	0
534
535	.globl	_apic_pin_trigger
536_apic_pin_trigger:
537	.space	(NAPIC * 4), 0
538
539
540/*
541 * Interrupt counters and names.  The format of these and the label names
542 * must agree with what vmstat expects.  The tables are indexed by device
543 * ids so that we don't have to move the names around as devices are
544 * attached.
545 */
546#include "vector.h"
547	.globl	_intrcnt, _eintrcnt
548_intrcnt:
549	.space	(NR_DEVICES + ICU_LEN) * 4
550_eintrcnt:
551
552	.globl	_intrnames, _eintrnames
553_intrnames:
554	.ascii	DEVICE_NAMES
555	.asciz	"stray irq0"
556	.asciz	"stray irq1"
557	.asciz	"stray irq2"
558	.asciz	"stray irq3"
559	.asciz	"stray irq4"
560	.asciz	"stray irq5"
561	.asciz	"stray irq6"
562	.asciz	"stray irq7"
563	.asciz	"stray irq8"
564	.asciz	"stray irq9"
565	.asciz	"stray irq10"
566	.asciz	"stray irq11"
567	.asciz	"stray irq12"
568	.asciz	"stray irq13"
569	.asciz	"stray irq14"
570	.asciz	"stray irq15"
571	.asciz	"stray irq16"
572	.asciz	"stray irq17"
573	.asciz	"stray irq18"
574	.asciz	"stray irq19"
575	.asciz	"stray irq20"
576	.asciz	"stray irq21"
577	.asciz	"stray irq22"
578	.asciz	"stray irq23"
579_eintrnames:
580
581	.text
582