apic_vector.s revision 88322
1194676Sthompsa/*
2194676Sthompsa *	from: vector.s, 386BSD 0.1 unknown origin
3194676Sthompsa * $FreeBSD: head/sys/i386/i386/apic_vector.s 88322 2001-12-20 23:48:31Z jhb $
4194676Sthompsa */
5194676Sthompsa
6194676Sthompsa
7194676Sthompsa#include <machine/apic.h>
8194676Sthompsa#include <machine/smp.h>
9194676Sthompsa
10194676Sthompsa/* convert an absolute IRQ# into a bitmask */
11194676Sthompsa#define IRQ_BIT(irq_num)	(1 << (irq_num))
12194676Sthompsa
13194676Sthompsa/* make an index into the IO APIC from the IRQ# */
14194676Sthompsa#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15194676Sthompsa
16194676Sthompsa/*
17194676Sthompsa *
18194676Sthompsa */
19194676Sthompsa#define PUSH_FRAME							\
20194676Sthompsa	pushl	$0 ;		/* dummy error code */			\
21194676Sthompsa	pushl	$0 ;		/* dummy trap type */			\
22194676Sthompsa	pushal ;							\
23194676Sthompsa	pushl	%ds ;		/* save data and extra segments ... */	\
24194676Sthompsa	pushl	%es ;							\
25194676Sthompsa	pushl	%fs
26194676Sthompsa
27194676Sthompsa#define POP_FRAME							\
28195957Salfred	popl	%fs ;							\
29194676Sthompsa	popl	%es ;							\
30248236Shselasky	popl	%ds ;							\
31195957Salfred	popal ;								\
32248236Shselasky	addl	$4+4,%esp
33194676Sthompsa
34195957Salfred/*
35195957Salfred * Macros for interrupt entry, call to handler, and exit.
36195957Salfred */
37195957Salfred
38195957Salfred#define	FAST_INTR(irq_num, vec_name)					\
39194676Sthompsa	.text ;								\
40195957Salfred	SUPERALIGN_TEXT ;						\
41195957SalfredIDTVEC(vec_name) ;							\
42195957Salfred	PUSH_FRAME ;							\
43195957Salfred	movl	$KDSEL,%eax ;						\
44195957Salfred	mov	%ax,%ds ;						\
45195957Salfred	mov	%ax,%es ;						\
46195957Salfred	movl	$KPSEL,%eax ;						\
47195957Salfred	mov	%ax,%fs ;						\
48195957Salfred	FAKE_MCOUNT(13*4(%esp)) ;					\
49195957Salfred	movl	PCPU(CURTHREAD),%ebx ;					\
50195957Salfred	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
51195957Salfred	pushl	intr_unit + (irq_num) * 4 ;				\
52195957Salfred	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
53195957Salfred	addl	$4, %esp ;						\
54195957Salfred	movl	$0, lapic+LA_EOI ;					\
55195957Salfred	lock ; 								\
56194676Sthompsa	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
57195957Salfred	movl	intr_countp + (irq_num) * 4, %eax ;			\
58194676Sthompsa	lock ; 								\
59195957Salfred	incl	(%eax) ;						\
60195957Salfred	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
61195957Salfred	MEXITCOUNT ;							\
62195957Salfred	jmp	doreti
63195957Salfred
64194676Sthompsa#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
65195957Salfred#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
66195957Salfred
67195957Salfred#define MASK_IRQ(irq_num)						\
68195957Salfred	ICU_LOCK ;				/* into critical reg */	\
69195957Salfred	testl	$IRQ_BIT(irq_num), apic_imen ;				\
70199575Sthompsa	jne	7f ;			/* masked, don't mask */	\
71199575Sthompsa	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
72199575Sthompsa	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
73195957Salfred	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
74194676Sthompsa	movl	%eax, (%ecx) ;			/* write the index */	\
75195957Salfred	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
76195957Salfred	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
77195957Salfred	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
78195957Salfred7: ;						/* already masked */	\
79195957Salfred	ICU_UNLOCK
80195957Salfred/*
81194676Sthompsa * Test to see whether we are handling an edge or level triggered INT.
82195957Salfred *  Level-triggered INTs must still be masked as we don't clear the source,
83195957Salfred *  and the EOI cycle would cause redundant INTs to occur.
84195957Salfred */
85195957Salfred#define MASK_LEVEL_IRQ(irq_num)						\
86195957Salfred	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
87195957Salfred	jz	9f ;				/* edge, don't mask */	\
88195957Salfred	MASK_IRQ(irq_num) ;						\
89195957Salfred9:
90195957Salfred
91195957Salfred
92195957Salfred#ifdef APIC_INTR_REORDER
93195957Salfred#define EOI_IRQ(irq_num)						\
94195957Salfred	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
95195957Salfred	movl	(%eax), %eax ;						\
96195957Salfred	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
97195957Salfred	jz	9f ;				/* not active */	\
98195957Salfred	movl	$0, lapic+LA_EOI ;					\
99195957Salfred9:
100195957Salfred
101195957Salfred#else
102195957Salfred#define EOI_IRQ(irq_num)						\
103195957Salfred	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
104195957Salfred	jz	9f	;			/* not active */	\
105195957Salfred	movl	$0, lapic+LA_EOI;					\
106195957Salfred9:
107195957Salfred#endif
108195957Salfred
109195957Salfred
110195957Salfred/*
111195957Salfred * Test to see if the source is currently masked, clear if so.
112195957Salfred */
113195957Salfred#define UNMASK_IRQ(irq_num)					\
114195957Salfred	ICU_LOCK ;				/* into critical reg */	\
115195957Salfred	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
116195957Salfred	je	7f ;			/* bit clear, not masked */	\
117195957Salfred	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
118	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
119	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
120	movl	%eax, (%ecx) ;			/* write the index */	\
121	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
122	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
123	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1247: ;						/* already unmasked */	\
125	ICU_UNLOCK
126
127/*
128 * Slow, threaded interrupts.
129 *
130 * XXX Most of the parameters here are obsolete.  Fix this when we're
131 * done.
132 * XXX we really shouldn't return via doreti if we just schedule the
133 * interrupt handler and don't run anything.  We could just do an
134 * iret.  FIXME.
135 */
136#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
137	.text ;								\
138	SUPERALIGN_TEXT ;						\
139/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
140IDTVEC(vec_name) ;							\
141	PUSH_FRAME ;							\
142	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
143	mov	%ax, %ds ;						\
144	mov	%ax, %es ;						\
145	movl	$KPSEL, %eax ;						\
146	mov	%ax, %fs ;						\
147;									\
148	maybe_extra_ipending ;						\
149;									\
150	MASK_LEVEL_IRQ(irq_num) ;					\
151	EOI_IRQ(irq_num) ;						\
1520: ;									\
153	movl	PCPU(CURTHREAD),%ebx ;					\
154	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
155;	 								\
156	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
157	pushl	$irq_num;			/* pass the IRQ */	\
158	call	sched_ithd ;						\
159	addl	$4, %esp ;		/* discard the parameter */	\
160;									\
161	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
162	MEXITCOUNT ;							\
163	jmp	doreti
164
165/*
166 * Handle "spurious INTerrupts".
167 * Notes:
168 *  This is different than the "spurious INTerrupt" generated by an
169 *   8259 PIC for missing INTs.  See the APIC documentation for details.
170 *  This routine should NOT do an 'EOI' cycle.
171 */
172	.text
173	SUPERALIGN_TEXT
174	.globl Xspuriousint
175Xspuriousint:
176
177	/* No EOI cycle used here */
178
179	iret
180
181/*
182 * Handle TLB shootdowns.
183 */
184	.text
185	SUPERALIGN_TEXT
186	.globl	Xinvltlb
187Xinvltlb:
188	pushl	%eax
189
190#ifdef COUNT_XINVLTLB_HITS
191	pushl	%fs
192	movl	$KPSEL, %eax
193	mov	%ax, %fs
194	movl	PCPU(CPUID), %eax
195	popl	%fs
196	ss
197	incl	_xhits(,%eax,4)
198#endif /* COUNT_XINVLTLB_HITS */
199
200	movl	%cr3, %eax		/* invalidate the TLB */
201	movl	%eax, %cr3
202
203	ss				/* stack segment, avoid %ds load */
204	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
205
206	popl	%eax
207	iret
208
209/*
210 * Forward hardclock to another CPU.  Pushes a trapframe and calls
211 * forwarded_hardclock().
212 */
213	.text
214	SUPERALIGN_TEXT
215	.globl Xhardclock
216Xhardclock:
217	PUSH_FRAME
218	movl	$KDSEL, %eax	/* reload with kernel's data segment */
219	mov	%ax, %ds
220	mov	%ax, %es
221	movl	$KPSEL, %eax
222	mov	%ax, %fs
223
224	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
225
226	movl	PCPU(CURTHREAD),%ebx
227	incl	TD_INTR_NESTING_LEVEL(%ebx)
228	call	forwarded_hardclock
229	decl	TD_INTR_NESTING_LEVEL(%ebx)
230	MEXITCOUNT
231	jmp	doreti
232
233/*
234 * Forward statclock to another CPU.  Pushes a trapframe and calls
235 * forwarded_statclock().
236 */
237	.text
238	SUPERALIGN_TEXT
239	.globl Xstatclock
240Xstatclock:
241	PUSH_FRAME
242	movl	$KDSEL, %eax	/* reload with kernel's data segment */
243	mov	%ax, %ds
244	mov	%ax, %es
245	movl	$KPSEL, %eax
246	mov	%ax, %fs
247
248	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
249
250	FAKE_MCOUNT(13*4(%esp))
251	movl	PCPU(CURTHREAD),%ebx
252	incl	TD_INTR_NESTING_LEVEL(%ebx)
253	call	forwarded_statclock
254	decl	TD_INTR_NESTING_LEVEL(%ebx)
255	MEXITCOUNT
256	jmp	doreti
257
258/*
259 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
260 *
261 * The other CPU has already executed aston() or need_resched() on our
262 * current process, so we simply need to ack the interrupt and return
263 * via doreti to run ast().
264 */
265
266	.text
267	SUPERALIGN_TEXT
268	.globl Xcpuast
269Xcpuast:
270	PUSH_FRAME
271	movl	$KDSEL, %eax
272	mov	%ax, %ds		/* use KERNEL data segment */
273	mov	%ax, %es
274	movl	$KPSEL, %eax
275	mov	%ax, %fs
276
277	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
278
279	FAKE_MCOUNT(13*4(%esp))
280
281	MEXITCOUNT
282	jmp	doreti
283
284/*
285 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
286 *
287 *  - Signals its receipt.
288 *  - Waits for permission to restart.
289 *  - Signals its restart.
290 */
291	.text
292	SUPERALIGN_TEXT
293	.globl Xcpustop
294Xcpustop:
295	pushl	%ebp
296	movl	%esp, %ebp
297	pushl	%eax
298	pushl	%ecx
299	pushl	%edx
300	pushl	%ds			/* save current data segment */
301	pushl	%fs
302
303	movl	$KDSEL, %eax
304	mov	%ax, %ds		/* use KERNEL data segment */
305	movl	$KPSEL, %eax
306	mov	%ax, %fs
307
308	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
309
310	movl	PCPU(CPUID), %eax
311	imull	$PCB_SIZE, %eax
312	leal	CNAME(stoppcbs)(%eax), %eax
313	pushl	%eax
314	call	CNAME(savectx)		/* Save process context */
315	addl	$4, %esp
316
317	movl	PCPU(CPUID), %eax
318
319	lock
320	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
3211:
322	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
323	jnc	1b
324
325	lock
326	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
327	lock
328	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
329
330	test	%eax, %eax
331	jnz	2f
332
333	movl	CNAME(cpustop_restartfunc), %eax
334	test	%eax, %eax
335	jz	2f
336	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
337
338	call	*%eax
3392:
340	popl	%fs
341	popl	%ds			/* restore previous data segment */
342	popl	%edx
343	popl	%ecx
344	popl	%eax
345	movl	%ebp, %esp
346	popl	%ebp
347	iret
348
349
350MCOUNT_LABEL(bintr)
351	FAST_INTR(0,fastintr0)
352	FAST_INTR(1,fastintr1)
353	FAST_INTR(2,fastintr2)
354	FAST_INTR(3,fastintr3)
355	FAST_INTR(4,fastintr4)
356	FAST_INTR(5,fastintr5)
357	FAST_INTR(6,fastintr6)
358	FAST_INTR(7,fastintr7)
359	FAST_INTR(8,fastintr8)
360	FAST_INTR(9,fastintr9)
361	FAST_INTR(10,fastintr10)
362	FAST_INTR(11,fastintr11)
363	FAST_INTR(12,fastintr12)
364	FAST_INTR(13,fastintr13)
365	FAST_INTR(14,fastintr14)
366	FAST_INTR(15,fastintr15)
367	FAST_INTR(16,fastintr16)
368	FAST_INTR(17,fastintr17)
369	FAST_INTR(18,fastintr18)
370	FAST_INTR(19,fastintr19)
371	FAST_INTR(20,fastintr20)
372	FAST_INTR(21,fastintr21)
373	FAST_INTR(22,fastintr22)
374	FAST_INTR(23,fastintr23)
375	FAST_INTR(24,fastintr24)
376	FAST_INTR(25,fastintr25)
377	FAST_INTR(26,fastintr26)
378	FAST_INTR(27,fastintr27)
379	FAST_INTR(28,fastintr28)
380	FAST_INTR(29,fastintr29)
381	FAST_INTR(30,fastintr30)
382	FAST_INTR(31,fastintr31)
383#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
384/* Threaded interrupts */
385	INTR(0,intr0, CLKINTR_PENDING)
386	INTR(1,intr1,)
387	INTR(2,intr2,)
388	INTR(3,intr3,)
389	INTR(4,intr4,)
390	INTR(5,intr5,)
391	INTR(6,intr6,)
392	INTR(7,intr7,)
393	INTR(8,intr8,)
394	INTR(9,intr9,)
395	INTR(10,intr10,)
396	INTR(11,intr11,)
397	INTR(12,intr12,)
398	INTR(13,intr13,)
399	INTR(14,intr14,)
400	INTR(15,intr15,)
401	INTR(16,intr16,)
402	INTR(17,intr17,)
403	INTR(18,intr18,)
404	INTR(19,intr19,)
405	INTR(20,intr20,)
406	INTR(21,intr21,)
407	INTR(22,intr22,)
408	INTR(23,intr23,)
409	INTR(24,intr24,)
410	INTR(25,intr25,)
411	INTR(26,intr26,)
412	INTR(27,intr27,)
413	INTR(28,intr28,)
414	INTR(29,intr29,)
415	INTR(30,intr30,)
416	INTR(31,intr31,)
417MCOUNT_LABEL(eintr)
418
419/*
420 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
421 *
422 * - Calls the generic rendezvous action function.
423 */
424	.text
425	SUPERALIGN_TEXT
426	.globl	Xrendezvous
427Xrendezvous:
428	PUSH_FRAME
429	movl	$KDSEL, %eax
430	mov	%ax, %ds		/* use KERNEL data segment */
431	mov	%ax, %es
432	movl	$KPSEL, %eax
433	mov	%ax, %fs
434
435	call	smp_rendezvous_action
436
437	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
438	POP_FRAME
439	iret
440
441
442	.data
443
444#ifdef COUNT_XINVLTLB_HITS
445	.globl	_xhits
446_xhits:
447	.space	(NCPU * 4), 0
448#endif /* COUNT_XINVLTLB_HITS */
449
450	.globl	apic_pin_trigger
451apic_pin_trigger:
452	.long	0
453
454	.text
455