apic_vector.s revision 93264
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 93264 2002-03-27 05:39:23Z dillon $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 *
18 */
19#define PUSH_FRAME							\
20	pushl	$0 ;		/* dummy error code */			\
21	pushl	$0 ;		/* dummy trap type */			\
22	pushal ;		/* 8 ints */				\
23	pushl	%ds ;		/* save data and extra segments ... */	\
24	pushl	%es ;							\
25	pushl	%fs
26
27#define PUSH_DUMMY							\
28	pushfl ;		/* eflags */				\
29	pushl	%cs ;		/* cs */				\
30	pushl	12(%esp) ;	/* original caller eip */		\
31	pushl	$0 ;		/* dummy error code */			\
32	pushl	$0 ;		/* dummy trap type */			\
33	subl	$11*4,%esp ;
34
35#define POP_FRAME							\
36	popl	%fs ;							\
37	popl	%es ;							\
38	popl	%ds ;							\
39	popal ;								\
40	addl	$4+4,%esp
41
42#define POP_DUMMY							\
43	addl	$16*4,%esp
44
45#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
46#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
47
48#define MASK_IRQ(irq_num)						\
49	ICU_LOCK ;				/* into critical reg */	\
50	testl	$IRQ_BIT(irq_num), apic_imen ;				\
51	jne	7f ;			/* masked, don't mask */	\
52	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
53	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
54	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
55	movl	%eax, (%ecx) ;			/* write the index */	\
56	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
57	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
58	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
597: ;						/* already masked */	\
60	ICU_UNLOCK
61/*
62 * Test to see whether we are handling an edge or level triggered INT.
63 *  Level-triggered INTs must still be masked as we don't clear the source,
64 *  and the EOI cycle would cause redundant INTs to occur.
65 */
66#define MASK_LEVEL_IRQ(irq_num)						\
67	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
68	jz	9f ;				/* edge, don't mask */	\
69	MASK_IRQ(irq_num) ;						\
709:
71
72
73#ifdef APIC_INTR_REORDER
74#define EOI_IRQ(irq_num)						\
75	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
76	movl	(%eax), %eax ;						\
77	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
78	jz	9f ;				/* not active */	\
79	movl	$0, lapic+LA_EOI ;					\
809:
81
82#else
83#define EOI_IRQ(irq_num)						\
84	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
85	jz	9f	;			/* not active */	\
86	movl	$0, lapic+LA_EOI;					\
879:
88#endif
89
90
91/*
92 * Test to see if the source is currently masked, clear if so.
93 */
94#define UNMASK_IRQ(irq_num)					\
95	ICU_LOCK ;				/* into critical reg */	\
96	testl	$IRQ_BIT(irq_num), apic_imen ;				\
97	je	7f ;			/* bit clear, not masked */	\
98	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
99	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
100	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
101	movl	%eax, (%ecx) ;			/* write the index */	\
102	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
103	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
104	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1057: ;						/* already unmasked */	\
106	ICU_UNLOCK
107
108/*
109 * Test to see whether we are handling an edge or level triggered INT.
110 *  Level-triggered INTs have to be unmasked.
111 */
112#define UNMASK_LEVEL_IRQ(irq_num)					\
113	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
114	jz	9f ;			/* edge, don't unmask */	\
115	UNMASK_IRQ(irq_num) ;						\
1169:
117
118/*
119 * Macros for interrupt entry, call to handler, and exit.
120 */
121
122#define	FAST_INTR(irq_num, vec_name)					\
123	.text ;								\
124	SUPERALIGN_TEXT ;						\
125IDTVEC(vec_name) ;							\
126	PUSH_FRAME ;							\
127	movl	$KDSEL,%eax ;						\
128	mov	%ax,%ds ;						\
129	mov	%ax,%es ;						\
130	movl	$KPSEL,%eax ;						\
131	mov	%ax,%fs ;						\
132	FAKE_MCOUNT(13*4(%esp)) ;					\
133	movl	PCPU(CURTHREAD),%ebx ;					\
134	cmpl	$0,TD_CRITNEST(%ebx) ;					\
135	je	1f ;							\
136;									\
137	movl	$1,PCPU(INT_PENDING) ;					\
138	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
139	MASK_LEVEL_IRQ(irq_num) ;					\
140	movl	$0, lapic+LA_EOI ;					\
141	jmp	10f ;							\
1421: ;									\
143	incl	TD_CRITNEST(%ebx) ;					\
144	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
145	pushl	intr_unit + (irq_num) * 4 ;				\
146	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
147	addl	$4, %esp ;						\
148	movl	$0, lapic+LA_EOI ;					\
149	lock ; 								\
150	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
151	movl	intr_countp + (irq_num) * 4, %eax ;			\
152	lock ; 								\
153	incl	(%eax) ;						\
154	decl	TD_CRITNEST(%ebx) ;					\
155	cmpl	$0,PCPU(INT_PENDING) ;					\
156	je	2f ;							\
157;									\
158	call	unpend ;						\
1592: ;									\
160	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16110: ;									\
162	MEXITCOUNT ;							\
163	jmp	doreti
164
165/*
166 * Restart a fast interrupt that was held up by a critical section.
167 * This routine is called from unpend().  unpend() ensures we are
168 * in a critical section and deals with the interrupt nesting level
169 * for us.  If we previously masked the irq, we have to unmask it.
170 *
171 * We have a choice.  We can regenerate the irq using the 'int'
172 * instruction or we can create a dummy frame and call the interrupt
173 * handler directly.  I've chosen to use the dummy-frame method.
174 */
175#define	FAST_UNPEND(irq_num, vec_name)					\
176	.text ;								\
177	SUPERALIGN_TEXT ;						\
178IDTVEC(vec_name) ;							\
179;									\
180	pushl	%ebp ;							\
181	movl	%esp, %ebp ;						\
182	PUSH_DUMMY ;							\
183	pushl	intr_unit + (irq_num) * 4 ;				\
184	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
185	addl	$4, %esp ;						\
186	lock ; 								\
187	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
188	movl	intr_countp + (irq_num) * 4, %eax ;			\
189	lock ; 								\
190	incl	(%eax) ;						\
191	UNMASK_LEVEL_IRQ(irq_num) ;					\
192	POP_DUMMY ;							\
193	popl %ebp ;							\
194	ret ;								\
195
196
197/*
198 * Slow, threaded interrupts.
199 *
200 * XXX Most of the parameters here are obsolete.  Fix this when we're
201 * done.
202 * XXX we really shouldn't return via doreti if we just schedule the
203 * interrupt handler and don't run anything.  We could just do an
204 * iret.  FIXME.
205 */
206#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
207	.text ;								\
208	SUPERALIGN_TEXT ;						\
209/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
210IDTVEC(vec_name) ;							\
211	PUSH_FRAME ;							\
212	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
213	mov	%ax, %ds ;						\
214	mov	%ax, %es ;						\
215	movl	$KPSEL, %eax ;						\
216	mov	%ax, %fs ;						\
217;									\
218	maybe_extra_ipending ;						\
219;									\
220	MASK_LEVEL_IRQ(irq_num) ;					\
221	EOI_IRQ(irq_num) ;						\
222;									\
223	movl	PCPU(CURTHREAD),%ebx ;					\
224	cmpl	$0,TD_CRITNEST(%ebx) ;					\
225	je	1f ;							\
226	movl	$1,PCPU(INT_PENDING) ;					\
227	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
228	jmp	10f ;							\
2291: ;									\
230	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
231;	 								\
232	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
233	cmpl	$0,PCPU(INT_PENDING) ;					\
234	je	9f ;							\
235	call	unpend ;						\
2369: ;									\
237	pushl	$irq_num;			/* pass the IRQ */	\
238	call	sched_ithd ;						\
239	addl	$4, %esp ;		/* discard the parameter */	\
240;									\
241	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
24210: ;									\
243	MEXITCOUNT ;							\
244	jmp	doreti
245
246/*
247 * Handle "spurious INTerrupts".
248 * Notes:
249 *  This is different than the "spurious INTerrupt" generated by an
250 *   8259 PIC for missing INTs.  See the APIC documentation for details.
251 *  This routine should NOT do an 'EOI' cycle.
252 */
253	.text
254	SUPERALIGN_TEXT
255	.globl Xspuriousint
256Xspuriousint:
257
258	/* No EOI cycle used here */
259
260	iret
261
262/*
263 * Handle TLB shootdowns.
264 */
265	.text
266	SUPERALIGN_TEXT
267	.globl	Xinvltlb
268Xinvltlb:
269	pushl	%eax
270
271#ifdef COUNT_XINVLTLB_HITS
272	pushl	%fs
273	movl	$KPSEL, %eax
274	mov	%ax, %fs
275	movl	PCPU(CPUID), %eax
276	popl	%fs
277	ss
278	incl	_xhits(,%eax,4)
279#endif /* COUNT_XINVLTLB_HITS */
280
281	movl	%cr3, %eax		/* invalidate the TLB */
282	movl	%eax, %cr3
283
284	ss				/* stack segment, avoid %ds load */
285	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
286
287	popl	%eax
288	iret
289
290/*
291 * Forward hardclock to another CPU.  Pushes a trapframe and calls
292 * forwarded_hardclock().
293 */
294	.text
295	SUPERALIGN_TEXT
296	.globl Xhardclock
297Xhardclock:
298	PUSH_FRAME
299	movl	$KDSEL, %eax	/* reload with kernel's data segment */
300	mov	%ax, %ds
301	mov	%ax, %es
302	movl	$KPSEL, %eax
303	mov	%ax, %fs
304
305	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
306
307	movl	PCPU(CURTHREAD),%ebx
308	cmpl	$0,TD_CRITNEST(%ebx)
309	je	1f
310	movl	$1,PCPU(INT_PENDING)
311	orl	$1,PCPU(SPENDING);
312	jmp	10f
3131:
314	incl	TD_INTR_NESTING_LEVEL(%ebx)
315	call	forwarded_hardclock
316	decl	TD_INTR_NESTING_LEVEL(%ebx)
31710:
318	MEXITCOUNT
319	jmp	doreti
320
321/*
322 * Forward statclock to another CPU.  Pushes a trapframe and calls
323 * forwarded_statclock().
324 */
325	.text
326	SUPERALIGN_TEXT
327	.globl Xstatclock
328Xstatclock:
329	PUSH_FRAME
330	movl	$KDSEL, %eax	/* reload with kernel's data segment */
331	mov	%ax, %ds
332	mov	%ax, %es
333	movl	$KPSEL, %eax
334	mov	%ax, %fs
335
336	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
337
338	FAKE_MCOUNT(13*4(%esp))
339
340	movl	PCPU(CURTHREAD),%ebx
341	cmpl	$0,TD_CRITNEST(%ebx)
342	je	1f
343	movl	$1,PCPU(INT_PENDING)
344	orl	$2,PCPU(SPENDING);
345	jmp	10f
3461:
347	incl	TD_INTR_NESTING_LEVEL(%ebx)
348	call	forwarded_statclock
349	decl	TD_INTR_NESTING_LEVEL(%ebx)
35010:
351	MEXITCOUNT
352	jmp	doreti
353
354/*
355 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
356 *
357 * The other CPU has already executed aston() or need_resched() on our
358 * current process, so we simply need to ack the interrupt and return
359 * via doreti to run ast().
360 */
361
362	.text
363	SUPERALIGN_TEXT
364	.globl Xcpuast
365Xcpuast:
366	PUSH_FRAME
367	movl	$KDSEL, %eax
368	mov	%ax, %ds		/* use KERNEL data segment */
369	mov	%ax, %es
370	movl	$KPSEL, %eax
371	mov	%ax, %fs
372
373	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
374
375	FAKE_MCOUNT(13*4(%esp))
376
377	MEXITCOUNT
378	jmp	doreti
379
380/*
381 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
382 *
383 *  - Signals its receipt.
384 *  - Waits for permission to restart.
385 *  - Signals its restart.
386 */
387	.text
388	SUPERALIGN_TEXT
389	.globl Xcpustop
390Xcpustop:
391	pushl	%ebp
392	movl	%esp, %ebp
393	pushl	%eax
394	pushl	%ecx
395	pushl	%edx
396	pushl	%ds			/* save current data segment */
397	pushl	%fs
398
399	movl	$KDSEL, %eax
400	mov	%ax, %ds		/* use KERNEL data segment */
401	movl	$KPSEL, %eax
402	mov	%ax, %fs
403
404	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
405
406	movl	PCPU(CPUID), %eax
407	imull	$PCB_SIZE, %eax
408	leal	CNAME(stoppcbs)(%eax), %eax
409	pushl	%eax
410	call	CNAME(savectx)		/* Save process context */
411	addl	$4, %esp
412
413	movl	PCPU(CPUID), %eax
414
415	lock
416	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4171:
418	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
419	jnc	1b
420
421	lock
422	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
423	lock
424	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
425
426	test	%eax, %eax
427	jnz	2f
428
429	movl	CNAME(cpustop_restartfunc), %eax
430	test	%eax, %eax
431	jz	2f
432	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
433
434	call	*%eax
4352:
436	popl	%fs
437	popl	%ds			/* restore previous data segment */
438	popl	%edx
439	popl	%ecx
440	popl	%eax
441	movl	%ebp, %esp
442	popl	%ebp
443	iret
444
445
446MCOUNT_LABEL(bintr)
447	FAST_INTR(0,fastintr0)
448	FAST_INTR(1,fastintr1)
449	FAST_INTR(2,fastintr2)
450	FAST_INTR(3,fastintr3)
451	FAST_INTR(4,fastintr4)
452	FAST_INTR(5,fastintr5)
453	FAST_INTR(6,fastintr6)
454	FAST_INTR(7,fastintr7)
455	FAST_INTR(8,fastintr8)
456	FAST_INTR(9,fastintr9)
457	FAST_INTR(10,fastintr10)
458	FAST_INTR(11,fastintr11)
459	FAST_INTR(12,fastintr12)
460	FAST_INTR(13,fastintr13)
461	FAST_INTR(14,fastintr14)
462	FAST_INTR(15,fastintr15)
463	FAST_INTR(16,fastintr16)
464	FAST_INTR(17,fastintr17)
465	FAST_INTR(18,fastintr18)
466	FAST_INTR(19,fastintr19)
467	FAST_INTR(20,fastintr20)
468	FAST_INTR(21,fastintr21)
469	FAST_INTR(22,fastintr22)
470	FAST_INTR(23,fastintr23)
471	FAST_INTR(24,fastintr24)
472	FAST_INTR(25,fastintr25)
473	FAST_INTR(26,fastintr26)
474	FAST_INTR(27,fastintr27)
475	FAST_INTR(28,fastintr28)
476	FAST_INTR(29,fastintr29)
477	FAST_INTR(30,fastintr30)
478	FAST_INTR(31,fastintr31)
479#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
480/* Threaded interrupts */
481	INTR(0,intr0, CLKINTR_PENDING)
482	INTR(1,intr1,)
483	INTR(2,intr2,)
484	INTR(3,intr3,)
485	INTR(4,intr4,)
486	INTR(5,intr5,)
487	INTR(6,intr6,)
488	INTR(7,intr7,)
489	INTR(8,intr8,)
490	INTR(9,intr9,)
491	INTR(10,intr10,)
492	INTR(11,intr11,)
493	INTR(12,intr12,)
494	INTR(13,intr13,)
495	INTR(14,intr14,)
496	INTR(15,intr15,)
497	INTR(16,intr16,)
498	INTR(17,intr17,)
499	INTR(18,intr18,)
500	INTR(19,intr19,)
501	INTR(20,intr20,)
502	INTR(21,intr21,)
503	INTR(22,intr22,)
504	INTR(23,intr23,)
505	INTR(24,intr24,)
506	INTR(25,intr25,)
507	INTR(26,intr26,)
508	INTR(27,intr27,)
509	INTR(28,intr28,)
510	INTR(29,intr29,)
511	INTR(30,intr30,)
512	INTR(31,intr31,)
513
514	FAST_UNPEND(0,fastunpend0)
515	FAST_UNPEND(1,fastunpend1)
516	FAST_UNPEND(2,fastunpend2)
517	FAST_UNPEND(3,fastunpend3)
518	FAST_UNPEND(4,fastunpend4)
519	FAST_UNPEND(5,fastunpend5)
520	FAST_UNPEND(6,fastunpend6)
521	FAST_UNPEND(7,fastunpend7)
522	FAST_UNPEND(8,fastunpend8)
523	FAST_UNPEND(9,fastunpend9)
524	FAST_UNPEND(10,fastunpend10)
525	FAST_UNPEND(11,fastunpend11)
526	FAST_UNPEND(12,fastunpend12)
527	FAST_UNPEND(13,fastunpend13)
528	FAST_UNPEND(14,fastunpend14)
529	FAST_UNPEND(15,fastunpend15)
530	FAST_UNPEND(16,fastunpend16)
531	FAST_UNPEND(17,fastunpend17)
532	FAST_UNPEND(18,fastunpend18)
533	FAST_UNPEND(19,fastunpend19)
534	FAST_UNPEND(20,fastunpend20)
535	FAST_UNPEND(21,fastunpend21)
536	FAST_UNPEND(22,fastunpend22)
537	FAST_UNPEND(23,fastunpend23)
538	FAST_UNPEND(24,fastunpend24)
539	FAST_UNPEND(25,fastunpend25)
540	FAST_UNPEND(26,fastunpend26)
541	FAST_UNPEND(27,fastunpend27)
542	FAST_UNPEND(28,fastunpend28)
543	FAST_UNPEND(29,fastunpend29)
544	FAST_UNPEND(30,fastunpend30)
545	FAST_UNPEND(31,fastunpend31)
546MCOUNT_LABEL(eintr)
547
548/*
549 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
550 *
551 * - Calls the generic rendezvous action function.
552 */
553	.text
554	SUPERALIGN_TEXT
555	.globl	Xrendezvous
556Xrendezvous:
557	PUSH_FRAME
558	movl	$KDSEL, %eax
559	mov	%ax, %ds		/* use KERNEL data segment */
560	mov	%ax, %es
561	movl	$KPSEL, %eax
562	mov	%ax, %fs
563
564	call	smp_rendezvous_action
565
566	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
567	POP_FRAME
568	iret
569
570
571	.data
572
573#ifdef COUNT_XINVLTLB_HITS
574	.globl	_xhits
575_xhits:
576	.space	(NCPU * 4), 0
577#endif /* COUNT_XINVLTLB_HITS */
578
579	.globl	apic_pin_trigger
580apic_pin_trigger:
581	.long	0
582
583	.text
584