apic_vector.s revision 29677
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.21 1997/09/07 22:02:36 fsmp Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	movl	$KDSEL,%eax ;						\
62	movl	%ax,%ds ;						\
63	MAYBE_MOVW_AX_ES ;						\
64	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
65	pushl	_intr_unit + (irq_num) * 4 ;				\
66	GET_FAST_INTR_LOCK ;						\
67	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68	REL_FAST_INTR_LOCK ;						\
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	MAYBE_POPL_ES ;							\
78	popl	%ds ;							\
79	popl	%edx ;							\
80	popl	%ecx ;							\
81	popl	%eax ;							\
82	iret
83
84#else /* FAST_WITHOUTCPL */
85
86#define	FAST_INTR(irq_num, vec_name)					\
87	.text ;								\
88	SUPERALIGN_TEXT ;						\
89IDTVEC(vec_name) ;							\
90	pushl	%eax ;		/* save only call-used registers */	\
91	pushl	%ecx ;							\
92	pushl	%edx ;							\
93	pushl	%ds ;							\
94	MAYBE_PUSHL_ES ;						\
95	movl	$KDSEL, %eax ;						\
96	movl	%ax, %ds ;						\
97	MAYBE_MOVW_AX_ES ;						\
98	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
99	GET_FAST_INTR_LOCK ;						\
100	pushl	_intr_unit + (irq_num) * 4 ;				\
101	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
102	addl	$4, %esp ;						\
103	movl	$0, lapic_eoi ;						\
104	lock ; 								\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	lock ; 								\
108	incl	(%eax) ;						\
109	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
110	notl	%eax ;							\
111	andl	_ipending, %eax ;					\
112	jne	2f ; 		/* yes, maybe handle them */		\
1131: ;									\
114	MEXITCOUNT ;							\
115	REL_FAST_INTR_LOCK ;						\
116	MAYBE_POPL_ES ;							\
117	popl	%ds ;							\
118	popl	%edx ;							\
119	popl	%ecx ;							\
120	popl	%eax ;							\
121	iret ;								\
122;									\
123	ALIGN_TEXT ;							\
1242: ;									\
125	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
126	jae	1b ;		/* no, return */			\
127	movl	_cpl, %eax ;						\
128	/* XXX next line is probably unnecessary now. */		\
129	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
130	lock ; 								\
131	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
132	sti ;			/* to do this as early as possible */	\
133	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
134	popl	%ecx ;		/* ... original %ds ... */		\
135	popl	%edx ;							\
136	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
137	pushal ;		/* build fat frame (grrr) ... */	\
138	pushl	%ecx ;		/* ... actually %ds ... */		\
139	pushl	%es ;							\
140	movl	$KDSEL, %eax ;						\
141	movl	%ax, %es ;						\
142	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
143	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
144	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
145	pushl	%eax ;							\
146	subl	$4, %esp ;	/* junk for unit number */		\
147	MEXITCOUNT ;							\
148	jmp	_doreti
149
150#endif /** FAST_WITHOUTCPL */
151
152
153/*
154 *
155 */
156#define PUSH_FRAME							\
157	pushl	$0 ;		/* dummy error code */			\
158	pushl	$0 ;		/* dummy trap type */			\
159	pushal ;							\
160	pushl	%ds ;		/* save data and extra segments ... */	\
161	pushl	%es
162
163#define POP_FRAME							\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp
168
169/*
170 * Test to see whether we are handling an edge or level triggered INT.
171 *  Level-triggered INTs must still be masked as we don't clear the source,
172 *  and the EOI cycle would cause redundant INTs to occur.
173 */
174#define MASK_LEVEL_IRQ(irq_num)						\
175	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
176	jz	8f ;				/* edge, don't mask */	\
177	IMASK_LOCK ;				/* into critical reg */	\
178	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
179	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
180	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
181	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
182	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
183	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
184	IMASK_UNLOCK ;							\
1858:
186
187/*
188 * Test to see if the source is currntly masked, clear if so.
189 */
190#define UNMASK_IRQ(irq_num)					\
191	IMASK_LOCK ;				/* into critical reg */	\
192	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
193	je	9f ;							\
194	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
195	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
196	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
197	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
198	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
199	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2009: ;									\
201	IMASK_UNLOCK
202
203#ifdef INTR_SIMPLELOCK
204#define ENLOCK
205#define DELOCK
206#define LATELOCK call	_get_isrlock
207#else
208#define ENLOCK \
209	ISR_TRYLOCK ;		/* XXX this is going away... */		\
210	testl	%eax, %eax ;			/* did we get it? */	\
211	jz	1f
212#define DELOCK	ISR_RELLOCK
213#define LATELOCK
214#endif
215
216#ifdef CPL_AND_CML
217
218#define	INTR(irq_num, vec_name)						\
219	.text ;								\
220	SUPERALIGN_TEXT ;						\
221/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
222IDTVEC(vec_name) ;							\
223	PUSH_FRAME ;							\
224	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
225	movl	%ax, %ds ;						\
226	movl	%ax, %es ;						\
227;									\
228	lock ;					/* MP-safe */		\
229	btsl	$(irq_num), iactive ;		/* lazy masking */	\
230	jc	1f ;				/* already active */	\
231;									\
232	ENLOCK ;							\
233;									\
234	AVCPL_LOCK ;				/* MP-safe */		\
235	testl	$IRQ_BIT(irq_num), _cpl ;				\
236	jne	2f ;				/* this INT masked */	\
237	testl	$IRQ_BIT(irq_num), _cml ;				\
238	jne	2f ;				/* this INT masked */	\
239	orl	$IRQ_BIT(irq_num), _cil ;				\
240	AVCPL_UNLOCK ;							\
241;									\
242	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
243	incb	_intr_nesting_level ;					\
244;	 								\
245  /* entry point used by doreti_unpend for HWIs. */			\
246__CONCAT(Xresume,irq_num): ;						\
247	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
248	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
249	movl	_intr_countp + (irq_num) * 4, %eax ;			\
250	lock ;	incl	(%eax) ;					\
251;									\
252	AVCPL_LOCK ;				/* MP-safe */		\
253	movl	_cml, %eax ;						\
254	pushl	%eax ;							\
255	orl	_intr_mask + (irq_num) * 4, %eax ;			\
256	movl	%eax, _cml ;						\
257	AVCPL_UNLOCK ;							\
258;									\
259	pushl	_intr_unit + (irq_num) * 4 ;				\
260	incl	_inside_intr ;						\
261	sti ;								\
262	call	*_intr_handler + (irq_num) * 4 ;			\
263	cli ;								\
264	decl	_inside_intr ;						\
265;									\
266	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
267	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
268	UNMASK_IRQ(irq_num) ;						\
269	sti ;				/* doreti repeats cli/sti */	\
270	MEXITCOUNT ;							\
271	LATELOCK ;							\
272	jmp	_doreti ;						\
273;									\
274	ALIGN_TEXT ;							\
2751: ;						/* active or locked */	\
276	MASK_LEVEL_IRQ(irq_num) ;					\
277	movl	$0, lapic_eoi ;			/* do the EOI */	\
278;									\
279	AVCPL_LOCK ;				/* MP-safe */		\
280	orl	$IRQ_BIT(irq_num), _ipending ;				\
281	AVCPL_UNLOCK ;							\
282;									\
283	POP_FRAME ;							\
284	iret ;								\
285;									\
286	ALIGN_TEXT ;							\
2872: ;						/* masked by cpl|cml */	\
288	AVCPL_UNLOCK ;							\
289	DELOCK ;		/* XXX this is going away... */		\
290	jmp	1b
291
292#else /* CPL_AND_CML */
293
294#define	INTR(irq_num, vec_name)						\
295	.text ;								\
296	SUPERALIGN_TEXT ;						\
297/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
298IDTVEC(vec_name) ;							\
299	PUSH_FRAME ;							\
300	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
301	movl	%ax, %ds ;						\
302	movl	%ax, %es ;						\
303;									\
304	lock ;					/* MP-safe */		\
305	btsl	$(irq_num), iactive ;		/* lazy masking */	\
306	jc	1f ;				/* already active */	\
307;									\
308	ISR_TRYLOCK ;		/* XXX this is going away... */		\
309	testl	%eax, %eax ;			/* did we get it? */	\
310	jz	1f ;				/* no */		\
311;									\
312	AVCPL_LOCK ;				/* MP-safe */		\
313	testl	$IRQ_BIT(irq_num), _cpl ;				\
314	jne	2f ;				/* this INT masked */	\
315	AVCPL_UNLOCK ;							\
316;									\
317	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
318	incb	_intr_nesting_level ;					\
319;	 								\
320  /* entry point used by doreti_unpend for HWIs. */			\
321__CONCAT(Xresume,irq_num): ;						\
322	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
323	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
324	movl	_intr_countp + (irq_num) * 4, %eax ;			\
325	lock ;	incl	(%eax) ;					\
326;									\
327	AVCPL_LOCK ;				/* MP-safe */		\
328	movl	_cpl, %eax ;						\
329	pushl	%eax ;							\
330	orl	_intr_mask + (irq_num) * 4, %eax ;			\
331	movl	%eax, _cpl ;						\
332	AVCPL_UNLOCK ;							\
333;									\
334	pushl	_intr_unit + (irq_num) * 4 ;				\
335	sti ;								\
336	call	*_intr_handler + (irq_num) * 4 ;			\
337	cli ;								\
338;									\
339	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
340	UNMASK_IRQ(irq_num) ;						\
341	sti ;				/* doreti repeats cli/sti */	\
342	MEXITCOUNT ;							\
343	jmp	_doreti ;						\
344;									\
345	ALIGN_TEXT ;							\
3461: ;						/* active or locked */	\
347	MASK_LEVEL_IRQ(irq_num) ;					\
348	movl	$0, lapic_eoi ;			/* do the EOI */	\
349;									\
350	AVCPL_LOCK ;				/* MP-safe */		\
351	orl	$IRQ_BIT(irq_num), _ipending ;				\
352	AVCPL_UNLOCK ;							\
353;									\
354	POP_FRAME ;							\
355	iret ;								\
356;									\
357	ALIGN_TEXT ;							\
3582: ;						/* masked by cpl */	\
359	AVCPL_UNLOCK ;							\
360	ISR_RELLOCK ;		/* XXX this is going away... */		\
361	jmp	1b
362
363#endif /* CPL_AND_CML */
364
365
366/*
367 * Handle "spurious INTerrupts".
368 * Notes:
369 *  This is different than the "spurious INTerrupt" generated by an
370 *   8259 PIC for missing INTs.  See the APIC documentation for details.
371 *  This routine should NOT do an 'EOI' cycle.
372 */
373	.text
374	SUPERALIGN_TEXT
375	.globl _Xspuriousint
376_Xspuriousint:
377
378	/* No EOI cycle used here */
379
380	iret
381
382
383/*
384 * Handle TLB shootdowns.
385 */
386	.text
387	SUPERALIGN_TEXT
388	.globl	_Xinvltlb
389_Xinvltlb:
390	pushl	%eax
391
392#ifdef COUNT_XINVLTLB_HITS
393	ss
394	movl	_cpuid, %eax
395	ss
396	incl	_xhits(,%eax,4)
397#endif /* COUNT_XINVLTLB_HITS */
398
399	movl	%cr3, %eax		/* invalidate the TLB */
400	movl	%eax, %cr3
401
402	ss				/* stack segment, avoid %ds load */
403	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
404
405	popl	%eax
406	iret
407
408
409/*
410 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
411 *
412 *  - Signals its receipt.
413 *  - Waits for permission to restart.
414 *  - Signals its restart.
415 */
416
417	.text
418	SUPERALIGN_TEXT
419	.globl _Xcpustop
420_Xcpustop:
421	pushl	%eax
422	pushl	%ds			/* save current data segment */
423
424	movl	$KDSEL, %eax
425	movl	%ax, %ds		/* use KERNEL data segment */
426
427	movl	_cpuid, %eax
428
429	lock
430	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
4311:
432	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
433	jnc	1b
434
435	lock
436	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
437
438	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
439
440	popl	%ds			/* restore previous data segment */
441	popl	%eax
442	iret
443
444
445MCOUNT_LABEL(bintr)
446	FAST_INTR(0,fastintr0)
447	FAST_INTR(1,fastintr1)
448	FAST_INTR(2,fastintr2)
449	FAST_INTR(3,fastintr3)
450	FAST_INTR(4,fastintr4)
451	FAST_INTR(5,fastintr5)
452	FAST_INTR(6,fastintr6)
453	FAST_INTR(7,fastintr7)
454	FAST_INTR(8,fastintr8)
455	FAST_INTR(9,fastintr9)
456	FAST_INTR(10,fastintr10)
457	FAST_INTR(11,fastintr11)
458	FAST_INTR(12,fastintr12)
459	FAST_INTR(13,fastintr13)
460	FAST_INTR(14,fastintr14)
461	FAST_INTR(15,fastintr15)
462	FAST_INTR(16,fastintr16)
463	FAST_INTR(17,fastintr17)
464	FAST_INTR(18,fastintr18)
465	FAST_INTR(19,fastintr19)
466	FAST_INTR(20,fastintr20)
467	FAST_INTR(21,fastintr21)
468	FAST_INTR(22,fastintr22)
469	FAST_INTR(23,fastintr23)
470	INTR(0,intr0)
471	INTR(1,intr1)
472	INTR(2,intr2)
473	INTR(3,intr3)
474	INTR(4,intr4)
475	INTR(5,intr5)
476	INTR(6,intr6)
477	INTR(7,intr7)
478	INTR(8,intr8)
479	INTR(9,intr9)
480	INTR(10,intr10)
481	INTR(11,intr11)
482	INTR(12,intr12)
483	INTR(13,intr13)
484	INTR(14,intr14)
485	INTR(15,intr15)
486	INTR(16,intr16)
487	INTR(17,intr17)
488	INTR(18,intr18)
489	INTR(19,intr19)
490	INTR(20,intr20)
491	INTR(21,intr21)
492	INTR(22,intr22)
493	INTR(23,intr23)
494MCOUNT_LABEL(eintr)
495
496	.data
497/*
498 * Addresses of interrupt handlers.
499 *  XresumeNN: Resumption addresses for HWIs.
500 */
501	.globl _ihandlers
502_ihandlers:
503ihandlers:
504/*
505 * used by:
506 *  ipl.s:	doreti_unpend
507 */
508	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
509	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
510	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
511	.long	Xresume12, Xresume13, Xresume14, Xresume15
512	.long	Xresume16, Xresume17, Xresume18, Xresume19
513	.long	Xresume20, Xresume21, Xresume22, Xresume23
514/*
515 * used by:
516 *  ipl.s:	doreti_unpend
517 *  apic_ipl.s:	splz_unpend
518 */
519	.long	swi_tty, swi_net, dummycamisr, dummycamisr
520	.long	0, 0
521	.long	_softclock, swi_ast
522
523imasks:				/* masks for interrupt handlers */
524	.space	NHWI*4		/* padding; HWI masks are elsewhere */
525
526	.long	SWI_TTY_MASK, SWI_NET_MASK
527	.long	SWI_CAMNET_MASK, SWI_CAMBIO_MASK
528	.long	0, 0
529	.long	SWI_CLOCK_MASK, SWI_AST_MASK
530
531/*
532 * IDT vector entry points for the HWIs.
533 *
534 * used by:
535 *   i386/isa/clock.c:		setup Xintr8254
536 */
537	.globl _ivectors
538_ivectors:
539	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
540	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
541	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
542	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
543	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
544	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
545
546/* active flag for lazy masking */
547iactive:
548	.long	0
549
550#ifdef COUNT_XINVLTLB_HITS
551	.globl	_xhits
552_xhits:
553	.space	(NCPU * 4), 0
554#endif /* COUNT_XINVLTLB_HITS */
555
556/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
557	.globl _stopped_cpus, _started_cpus
558_stopped_cpus:
559	.long	0
560_started_cpus:
561	.long	0
562
563	.globl	_apic_pin_trigger
564_apic_pin_trigger:
565	.space	(NAPIC * 4), 0
566
567
568/*
569 * Interrupt counters and names.  The format of these and the label names
570 * must agree with what vmstat expects.  The tables are indexed by device
571 * ids so that we don't have to move the names around as devices are
572 * attached.
573 */
574#include "vector.h"
575	.globl	_intrcnt, _eintrcnt
576_intrcnt:
577	.space	(NR_DEVICES + ICU_LEN) * 4
578_eintrcnt:
579
580	.globl	_intrnames, _eintrnames
581_intrnames:
582	.ascii	DEVICE_NAMES
583	.asciz	"stray irq0"
584	.asciz	"stray irq1"
585	.asciz	"stray irq2"
586	.asciz	"stray irq3"
587	.asciz	"stray irq4"
588	.asciz	"stray irq5"
589	.asciz	"stray irq6"
590	.asciz	"stray irq7"
591	.asciz	"stray irq8"
592	.asciz	"stray irq9"
593	.asciz	"stray irq10"
594	.asciz	"stray irq11"
595	.asciz	"stray irq12"
596	.asciz	"stray irq13"
597	.asciz	"stray irq14"
598	.asciz	"stray irq15"
599	.asciz	"stray irq16"
600	.asciz	"stray irq17"
601	.asciz	"stray irq18"
602	.asciz	"stray irq19"
603	.asciz	"stray irq20"
604	.asciz	"stray irq21"
605	.asciz	"stray irq22"
606	.asciz	"stray irq23"
607_eintrnames:
608
609	.text
610