apic_vector.s revision 84733
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 84733 2001-10-09 19:54:52Z iedowse $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURTHREAD),%ebx ;					\
52	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	intr_unit + (irq_num) * 4 ;				\
54	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
55	addl	$4, %esp ;						\
56	movl	$0, lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURTHREAD),%ebx ;					\
156	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
159	pushl	$irq_num;			/* pass the IRQ */	\
160	call	sched_ithd ;						\
161	addl	$4, %esp ;		/* discard the parameter */	\
162;									\
163	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
164	MEXITCOUNT ;							\
165	jmp	doreti
166
167/*
168 * Handle "spurious INTerrupts".
169 * Notes:
170 *  This is different than the "spurious INTerrupt" generated by an
171 *   8259 PIC for missing INTs.  See the APIC documentation for details.
172 *  This routine should NOT do an 'EOI' cycle.
173 */
174	.text
175	SUPERALIGN_TEXT
176	.globl Xspuriousint
177Xspuriousint:
178
179	/* No EOI cycle used here */
180
181	iret
182
183/*
184 * Handle TLB shootdowns.
185 */
186	.text
187	SUPERALIGN_TEXT
188	.globl	Xinvltlb
189Xinvltlb:
190	pushl	%eax
191
192#ifdef COUNT_XINVLTLB_HITS
193	pushl	%fs
194	movl	$KPSEL, %eax
195	mov	%ax, %fs
196	movl	PCPU(CPUID), %eax
197	popl	%fs
198	ss
199	incl	_xhits(,%eax,4)
200#endif /* COUNT_XINVLTLB_HITS */
201
202	movl	%cr3, %eax		/* invalidate the TLB */
203	movl	%eax, %cr3
204
205	ss				/* stack segment, avoid %ds load */
206	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
207
208	popl	%eax
209	iret
210
211/*
212 * Forward hardclock to another CPU.  Pushes a trapframe and calls
213 * forwarded_hardclock().
214 */
215	.text
216	SUPERALIGN_TEXT
217	.globl Xhardclock
218Xhardclock:
219	PUSH_FRAME
220	movl	$KDSEL, %eax	/* reload with kernel's data segment */
221	mov	%ax, %ds
222	mov	%ax, %es
223	movl	$KPSEL, %eax
224	mov	%ax, %fs
225
226	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
227
228	movl	PCPU(CURTHREAD),%ebx
229	incl	TD_INTR_NESTING_LEVEL(%ebx)
230	call	forwarded_hardclock
231	decl	TD_INTR_NESTING_LEVEL(%ebx)
232	MEXITCOUNT
233	jmp	doreti
234
235/*
236 * Forward statclock to another CPU.  Pushes a trapframe and calls
237 * forwarded_statclock().
238 */
239	.text
240	SUPERALIGN_TEXT
241	.globl Xstatclock
242Xstatclock:
243	PUSH_FRAME
244	movl	$KDSEL, %eax	/* reload with kernel's data segment */
245	mov	%ax, %ds
246	mov	%ax, %es
247	movl	$KPSEL, %eax
248	mov	%ax, %fs
249
250	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
251
252	FAKE_MCOUNT(13*4(%esp))
253	movl	PCPU(CURTHREAD),%ebx
254	incl	TD_INTR_NESTING_LEVEL(%ebx)
255	call	forwarded_statclock
256	decl	TD_INTR_NESTING_LEVEL(%ebx)
257	MEXITCOUNT
258	jmp	doreti
259
260/*
261 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
262 *
263 * The other CPU has already executed aston() or need_resched() on our
264 * current process, so we simply need to ack the interrupt and return
265 * via doreti to run ast().
266 */
267
268	.text
269	SUPERALIGN_TEXT
270	.globl Xcpuast
271Xcpuast:
272	PUSH_FRAME
273	movl	$KDSEL, %eax
274	mov	%ax, %ds		/* use KERNEL data segment */
275	mov	%ax, %es
276	movl	$KPSEL, %eax
277	mov	%ax, %fs
278
279	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
280
281	FAKE_MCOUNT(13*4(%esp))
282
283	MEXITCOUNT
284	jmp	doreti
285
286/*
287 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
288 *
289 *  - Signals its receipt.
290 *  - Waits for permission to restart.
291 *  - Signals its restart.
292 */
293	.text
294	SUPERALIGN_TEXT
295	.globl Xcpustop
296Xcpustop:
297	pushl	%ebp
298	movl	%esp, %ebp
299	pushl	%eax
300	pushl	%ecx
301	pushl	%edx
302	pushl	%ds			/* save current data segment */
303	pushl	%fs
304
305	movl	$KDSEL, %eax
306	mov	%ax, %ds		/* use KERNEL data segment */
307	movl	$KPSEL, %eax
308	mov	%ax, %fs
309
310	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
311
312	movl	PCPU(CPUID), %eax
313	imull	$PCB_SIZE, %eax
314	leal	CNAME(stoppcbs)(%eax), %eax
315	pushl	%eax
316	call	CNAME(savectx)		/* Save process context */
317	addl	$4, %esp
318
319	movl	PCPU(CPUID), %eax
320
321	lock
322	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
3231:
324	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
325	jnc	1b
326
327	lock
328	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
329	lock
330	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
331
332	test	%eax, %eax
333	jnz	2f
334
335	movl	CNAME(cpustop_restartfunc), %eax
336	test	%eax, %eax
337	jz	2f
338	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
339
340	call	*%eax
3412:
342	popl	%fs
343	popl	%ds			/* restore previous data segment */
344	popl	%edx
345	popl	%ecx
346	popl	%eax
347	movl	%ebp, %esp
348	popl	%ebp
349	iret
350
351
352MCOUNT_LABEL(bintr)
353	FAST_INTR(0,fastintr0)
354	FAST_INTR(1,fastintr1)
355	FAST_INTR(2,fastintr2)
356	FAST_INTR(3,fastintr3)
357	FAST_INTR(4,fastintr4)
358	FAST_INTR(5,fastintr5)
359	FAST_INTR(6,fastintr6)
360	FAST_INTR(7,fastintr7)
361	FAST_INTR(8,fastintr8)
362	FAST_INTR(9,fastintr9)
363	FAST_INTR(10,fastintr10)
364	FAST_INTR(11,fastintr11)
365	FAST_INTR(12,fastintr12)
366	FAST_INTR(13,fastintr13)
367	FAST_INTR(14,fastintr14)
368	FAST_INTR(15,fastintr15)
369	FAST_INTR(16,fastintr16)
370	FAST_INTR(17,fastintr17)
371	FAST_INTR(18,fastintr18)
372	FAST_INTR(19,fastintr19)
373	FAST_INTR(20,fastintr20)
374	FAST_INTR(21,fastintr21)
375	FAST_INTR(22,fastintr22)
376	FAST_INTR(23,fastintr23)
377	FAST_INTR(24,fastintr24)
378	FAST_INTR(25,fastintr25)
379	FAST_INTR(26,fastintr26)
380	FAST_INTR(27,fastintr27)
381	FAST_INTR(28,fastintr28)
382	FAST_INTR(29,fastintr29)
383	FAST_INTR(30,fastintr30)
384	FAST_INTR(31,fastintr31)
385#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
386/* Threaded interrupts */
387	INTR(0,intr0, CLKINTR_PENDING)
388	INTR(1,intr1,)
389	INTR(2,intr2,)
390	INTR(3,intr3,)
391	INTR(4,intr4,)
392	INTR(5,intr5,)
393	INTR(6,intr6,)
394	INTR(7,intr7,)
395	INTR(8,intr8,)
396	INTR(9,intr9,)
397	INTR(10,intr10,)
398	INTR(11,intr11,)
399	INTR(12,intr12,)
400	INTR(13,intr13,)
401	INTR(14,intr14,)
402	INTR(15,intr15,)
403	INTR(16,intr16,)
404	INTR(17,intr17,)
405	INTR(18,intr18,)
406	INTR(19,intr19,)
407	INTR(20,intr20,)
408	INTR(21,intr21,)
409	INTR(22,intr22,)
410	INTR(23,intr23,)
411	INTR(24,intr24,)
412	INTR(25,intr25,)
413	INTR(26,intr26,)
414	INTR(27,intr27,)
415	INTR(28,intr28,)
416	INTR(29,intr29,)
417	INTR(30,intr30,)
418	INTR(31,intr31,)
419MCOUNT_LABEL(eintr)
420
421/*
422 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
423 *
424 * - Calls the generic rendezvous action function.
425 */
426	.text
427	SUPERALIGN_TEXT
428	.globl	Xrendezvous
429Xrendezvous:
430	PUSH_FRAME
431	movl	$KDSEL, %eax
432	mov	%ax, %ds		/* use KERNEL data segment */
433	mov	%ax, %es
434	movl	$KPSEL, %eax
435	mov	%ax, %fs
436
437	call	smp_rendezvous_action
438
439	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
440	POP_FRAME
441	iret
442
443
444	.data
445
446#ifdef COUNT_XINVLTLB_HITS
447	.globl	_xhits
448_xhits:
449	.space	(NCPU * 4), 0
450#endif /* COUNT_XINVLTLB_HITS */
451
452	.globl	apic_pin_trigger
453apic_pin_trigger:
454	.long	0
455
456	.text
457