apic_vector.s revision 83366
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 83366 2001-09-12 08:38:13Z julian $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURTHREAD),%ebx ;					\
52	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	intr_unit + (irq_num) * 4 ;				\
54	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
55	addl	$4, %esp ;						\
56	movl	$0, lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURTHREAD),%ebx ;					\
156	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158  /* entry point used by doreti_unpend for HWIs. */			\
159__CONCAT(Xresume,irq_num): ;						\
160	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
161	pushl	$irq_num;			/* pass the IRQ */	\
162	call	sched_ithd ;						\
163	addl	$4, %esp ;		/* discard the parameter */	\
164;									\
165	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
166	MEXITCOUNT ;							\
167	jmp	doreti
168
169/*
170 * Handle "spurious INTerrupts".
171 * Notes:
172 *  This is different than the "spurious INTerrupt" generated by an
173 *   8259 PIC for missing INTs.  See the APIC documentation for details.
174 *  This routine should NOT do an 'EOI' cycle.
175 */
176	.text
177	SUPERALIGN_TEXT
178	.globl Xspuriousint
179Xspuriousint:
180
181	/* No EOI cycle used here */
182
183	iret
184
185/*
186 * Handle TLB shootdowns.
187 */
188	.text
189	SUPERALIGN_TEXT
190	.globl	Xinvltlb
191Xinvltlb:
192	pushl	%eax
193
194#ifdef COUNT_XINVLTLB_HITS
195	pushl	%fs
196	movl	$KPSEL, %eax
197	mov	%ax, %fs
198	movl	PCPU(CPUID), %eax
199	popl	%fs
200	ss
201	incl	_xhits(,%eax,4)
202#endif /* COUNT_XINVLTLB_HITS */
203
204	movl	%cr3, %eax		/* invalidate the TLB */
205	movl	%eax, %cr3
206
207	ss				/* stack segment, avoid %ds load */
208	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
209
210	popl	%eax
211	iret
212
213/*
214 * Forward hardclock to another CPU.  Pushes a trapframe and calls
215 * forwarded_hardclock().
216 */
217	.text
218	SUPERALIGN_TEXT
219	.globl Xhardclock
220Xhardclock:
221	PUSH_FRAME
222	movl	$KDSEL, %eax	/* reload with kernel's data segment */
223	mov	%ax, %ds
224	mov	%ax, %es
225	movl	$KPSEL, %eax
226	mov	%ax, %fs
227
228	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
229
230	movl	PCPU(CURTHREAD),%ebx
231	incl	TD_INTR_NESTING_LEVEL(%ebx)
232	call	forwarded_hardclock
233	decl	TD_INTR_NESTING_LEVEL(%ebx)
234	MEXITCOUNT
235	jmp	doreti
236
237/*
238 * Forward statclock to another CPU.  Pushes a trapframe and calls
239 * forwarded_statclock().
240 */
241	.text
242	SUPERALIGN_TEXT
243	.globl Xstatclock
244Xstatclock:
245	PUSH_FRAME
246	movl	$KDSEL, %eax	/* reload with kernel's data segment */
247	mov	%ax, %ds
248	mov	%ax, %es
249	movl	$KPSEL, %eax
250	mov	%ax, %fs
251
252	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
253
254	FAKE_MCOUNT(13*4(%esp))
255	movl	PCPU(CURTHREAD),%ebx
256	incl	TD_INTR_NESTING_LEVEL(%ebx)
257	call	forwarded_statclock
258	decl	TD_INTR_NESTING_LEVEL(%ebx)
259	MEXITCOUNT
260	jmp	doreti
261
262/*
263 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
264 *
265 * The other CPU has already executed aston() or need_resched() on our
266 * current process, so we simply need to ack the interrupt and return
267 * via doreti to run ast().
268 */
269
270	.text
271	SUPERALIGN_TEXT
272	.globl Xcpuast
273Xcpuast:
274	PUSH_FRAME
275	movl	$KDSEL, %eax
276	mov	%ax, %ds		/* use KERNEL data segment */
277	mov	%ax, %es
278	movl	$KPSEL, %eax
279	mov	%ax, %fs
280
281	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
282
283	FAKE_MCOUNT(13*4(%esp))
284
285	MEXITCOUNT
286	jmp	doreti
287
288/*
289 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
290 *
291 *  - Signals its receipt.
292 *  - Waits for permission to restart.
293 *  - Signals its restart.
294 */
295	.text
296	SUPERALIGN_TEXT
297	.globl Xcpustop
298Xcpustop:
299	pushl	%ebp
300	movl	%esp, %ebp
301	pushl	%eax
302	pushl	%ecx
303	pushl	%edx
304	pushl	%ds			/* save current data segment */
305	pushl	%fs
306
307	movl	$KDSEL, %eax
308	mov	%ax, %ds		/* use KERNEL data segment */
309	movl	$KPSEL, %eax
310	mov	%ax, %fs
311
312	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
313
314	movl	PCPU(CPUID), %eax
315	imull	$PCB_SIZE, %eax
316	leal	CNAME(stoppcbs)(%eax), %eax
317	pushl	%eax
318	call	CNAME(savectx)		/* Save process context */
319	addl	$4, %esp
320
321	movl	PCPU(CPUID), %eax
322
323	lock
324	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
3251:
326	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
327	jnc	1b
328
329	lock
330	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
331	lock
332	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
333
334	test	%eax, %eax
335	jnz	2f
336
337	movl	CNAME(cpustop_restartfunc), %eax
338	test	%eax, %eax
339	jz	2f
340	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
341
342	call	*%eax
3432:
344	popl	%fs
345	popl	%ds			/* restore previous data segment */
346	popl	%edx
347	popl	%ecx
348	popl	%eax
349	movl	%ebp, %esp
350	popl	%ebp
351	iret
352
353
354MCOUNT_LABEL(bintr)
355	FAST_INTR(0,fastintr0)
356	FAST_INTR(1,fastintr1)
357	FAST_INTR(2,fastintr2)
358	FAST_INTR(3,fastintr3)
359	FAST_INTR(4,fastintr4)
360	FAST_INTR(5,fastintr5)
361	FAST_INTR(6,fastintr6)
362	FAST_INTR(7,fastintr7)
363	FAST_INTR(8,fastintr8)
364	FAST_INTR(9,fastintr9)
365	FAST_INTR(10,fastintr10)
366	FAST_INTR(11,fastintr11)
367	FAST_INTR(12,fastintr12)
368	FAST_INTR(13,fastintr13)
369	FAST_INTR(14,fastintr14)
370	FAST_INTR(15,fastintr15)
371	FAST_INTR(16,fastintr16)
372	FAST_INTR(17,fastintr17)
373	FAST_INTR(18,fastintr18)
374	FAST_INTR(19,fastintr19)
375	FAST_INTR(20,fastintr20)
376	FAST_INTR(21,fastintr21)
377	FAST_INTR(22,fastintr22)
378	FAST_INTR(23,fastintr23)
379	FAST_INTR(24,fastintr24)
380	FAST_INTR(25,fastintr25)
381	FAST_INTR(26,fastintr26)
382	FAST_INTR(27,fastintr27)
383	FAST_INTR(28,fastintr28)
384	FAST_INTR(29,fastintr29)
385	FAST_INTR(30,fastintr30)
386	FAST_INTR(31,fastintr31)
387#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
388/* Threaded interrupts */
389	INTR(0,intr0, CLKINTR_PENDING)
390	INTR(1,intr1,)
391	INTR(2,intr2,)
392	INTR(3,intr3,)
393	INTR(4,intr4,)
394	INTR(5,intr5,)
395	INTR(6,intr6,)
396	INTR(7,intr7,)
397	INTR(8,intr8,)
398	INTR(9,intr9,)
399	INTR(10,intr10,)
400	INTR(11,intr11,)
401	INTR(12,intr12,)
402	INTR(13,intr13,)
403	INTR(14,intr14,)
404	INTR(15,intr15,)
405	INTR(16,intr16,)
406	INTR(17,intr17,)
407	INTR(18,intr18,)
408	INTR(19,intr19,)
409	INTR(20,intr20,)
410	INTR(21,intr21,)
411	INTR(22,intr22,)
412	INTR(23,intr23,)
413	INTR(24,intr24,)
414	INTR(25,intr25,)
415	INTR(26,intr26,)
416	INTR(27,intr27,)
417	INTR(28,intr28,)
418	INTR(29,intr29,)
419	INTR(30,intr30,)
420	INTR(31,intr31,)
421MCOUNT_LABEL(eintr)
422
423/*
424 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
425 *
426 * - Calls the generic rendezvous action function.
427 */
428	.text
429	SUPERALIGN_TEXT
430	.globl	Xrendezvous
431Xrendezvous:
432	PUSH_FRAME
433	movl	$KDSEL, %eax
434	mov	%ax, %ds		/* use KERNEL data segment */
435	mov	%ax, %es
436	movl	$KPSEL, %eax
437	mov	%ax, %fs
438
439	call	smp_rendezvous_action
440
441	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
442	POP_FRAME
443	iret
444
445
446	.data
447
448#ifdef COUNT_XINVLTLB_HITS
449	.globl	_xhits
450_xhits:
451	.space	(NCPU * 4), 0
452#endif /* COUNT_XINVLTLB_HITS */
453
454	.globl	apic_pin_trigger
455apic_pin_trigger:
456	.long	0
457
458	.text
459