apic_vector.s revision 91315
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 91315 2002-02-26 17:06:21Z dillon $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 *
18 */
19#define PUSH_FRAME							\
20	pushl	$0 ;		/* dummy error code */			\
21	pushl	$0 ;		/* dummy trap type */			\
22	pushal ;		/* 8 ints */				\
23	pushl	%ds ;		/* save data and extra segments ... */	\
24	pushl	%es ;							\
25	pushl	%fs
26
27#define PUSH_DUMMY							\
28	pushfl ;		/* eflags */				\
29	pushl	%cs ;		/* cs */				\
30	pushl	$0 ;		/* dummy eip */				\
31	pushl	$0 ;		/* dummy error code */			\
32	pushl	$0 ;		/* dummy trap type */			\
33	subl	$11*4,%esp ;
34
35#define POP_FRAME							\
36	popl	%fs ;							\
37	popl	%es ;							\
38	popl	%ds ;							\
39	popal ;								\
40	addl	$4+4,%esp
41
42#define POP_DUMMY							\
43	addl	$16*4,%esp
44
45#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
46#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
47
48#define MASK_IRQ(irq_num)						\
49	ICU_LOCK ;				/* into critical reg */	\
50	testl	$IRQ_BIT(irq_num), apic_imen ;				\
51	jne	7f ;			/* masked, don't mask */	\
52	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
53	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
54	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
55	movl	%eax, (%ecx) ;			/* write the index */	\
56	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
57	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
58	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
597: ;						/* already masked */	\
60	ICU_UNLOCK
61/*
62 * Test to see whether we are handling an edge or level triggered INT.
63 *  Level-triggered INTs must still be masked as we don't clear the source,
64 *  and the EOI cycle would cause redundant INTs to occur.
65 */
66#define MASK_LEVEL_IRQ(irq_num)						\
67	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
68	jz	9f ;				/* edge, don't mask */	\
69	MASK_IRQ(irq_num) ;						\
709:
71
72
73#ifdef APIC_INTR_REORDER
74#define EOI_IRQ(irq_num)						\
75	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
76	movl	(%eax), %eax ;						\
77	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
78	jz	9f ;				/* not active */	\
79	movl	$0, lapic+LA_EOI ;					\
809:
81
82#else
83#define EOI_IRQ(irq_num)						\
84	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
85	jz	9f	;			/* not active */	\
86	movl	$0, lapic+LA_EOI;					\
879:
88#endif
89
90
91/*
92 * Test to see if the source is currently masked, clear if so.
93 */
94#define UNMASK_IRQ(irq_num)					\
95	ICU_LOCK ;				/* into critical reg */	\
96	testl	$IRQ_BIT(irq_num), apic_imen ;				\
97	je	7f ;			/* bit clear, not masked */	\
98	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
99	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
100	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
101	movl	%eax, (%ecx) ;			/* write the index */	\
102	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
103	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
104	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1057: ;						/* already unmasked */	\
106	ICU_UNLOCK
107
108/*
109 * Test to see whether we are handling an edge or level triggered INT.
110 *  Level-triggered INTs have to be unmasked.
111 */
112#define UNMASK_LEVEL_IRQ(irq_num)					\
113	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
114	jz	9f ;			/* edge, don't unmask */	\
115	UNMASK_IRQ(irq_num) ;						\
1169:
117
118/*
119 * Macros for interrupt entry, call to handler, and exit.
120 */
121
122#define	FAST_INTR(irq_num, vec_name)					\
123	.text ;								\
124	SUPERALIGN_TEXT ;						\
125IDTVEC(vec_name) ;							\
126	PUSH_FRAME ;							\
127	movl	$KDSEL,%eax ;						\
128	mov	%ax,%ds ;						\
129	mov	%ax,%es ;						\
130	movl	$KPSEL,%eax ;						\
131	mov	%ax,%fs ;						\
132	FAKE_MCOUNT(13*4(%esp)) ;					\
133	movl	PCPU(CURTHREAD),%ebx ;					\
134	cmpl	$0,TD_CRITNEST(%ebx) ;					\
135	je	1f ;							\
136;									\
137	movl	$1,PCPU(INT_PENDING) ;					\
138	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
139	MASK_LEVEL_IRQ(irq_num) ;					\
140	movl	$0, lapic+LA_EOI ;					\
141	jmp	10f ;							\
1421: ;									\
143	incl	TD_CRITNEST(%ebx) ;					\
144	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
145	pushl	intr_unit + (irq_num) * 4 ;				\
146	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
147	addl	$4, %esp ;						\
148	movl	$0, lapic+LA_EOI ;					\
149	lock ; 								\
150	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
151	movl	intr_countp + (irq_num) * 4, %eax ;			\
152	lock ; 								\
153	incl	(%eax) ;						\
154	decl	TD_CRITNEST(%ebx) ;					\
155	cmpl	$0,PCPU(INT_PENDING) ;					\
156	je	2f ;							\
157;									\
158	call	unpend ;						\
1592: ;									\
160	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16110: ;									\
162	MEXITCOUNT ;							\
163	jmp	doreti
164
165/*
166 * Restart a fast interrupt that was held up by a critical section.
167 * This routine is called from unpend().  unpend() ensures we are
168 * in a critical section and deals with the interrupt nesting level
169 * for us.  If we previously masked the irq, we have to unmask it.
170 *
171 * We have a choice.  We can regenerate the irq using the 'int'
172 * instruction or we can create a dummy frame and call the interrupt
173 * handler directly.  I've chosen to use the dummy-frame method.
174 */
175#define	FAST_UNPEND(irq_num, vec_name)					\
176	.text ;								\
177	SUPERALIGN_TEXT ;						\
178IDTVEC(vec_name) ;							\
179;									\
180	PUSH_DUMMY ;							\
181	pushl	intr_unit + (irq_num) * 4 ;				\
182	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
183	addl	$4, %esp ;						\
184	lock ; 								\
185	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
186	movl	intr_countp + (irq_num) * 4, %eax ;			\
187	lock ; 								\
188	incl	(%eax) ;						\
189	UNMASK_LEVEL_IRQ(irq_num) ;					\
190	POP_DUMMY ;							\
191	ret ;								\
192
193
194/*
195 * Slow, threaded interrupts.
196 *
197 * XXX Most of the parameters here are obsolete.  Fix this when we're
198 * done.
199 * XXX we really shouldn't return via doreti if we just schedule the
200 * interrupt handler and don't run anything.  We could just do an
201 * iret.  FIXME.
202 */
203#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
204	.text ;								\
205	SUPERALIGN_TEXT ;						\
206/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
207IDTVEC(vec_name) ;							\
208	PUSH_FRAME ;							\
209	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
210	mov	%ax, %ds ;						\
211	mov	%ax, %es ;						\
212	movl	$KPSEL, %eax ;						\
213	mov	%ax, %fs ;						\
214;									\
215	maybe_extra_ipending ;						\
216;									\
217	MASK_LEVEL_IRQ(irq_num) ;					\
218	EOI_IRQ(irq_num) ;						\
219;									\
220	movl	PCPU(CURTHREAD),%ebx ;					\
221	cmpl	$0,TD_CRITNEST(%ebx) ;					\
222	je	1f ;							\
223	movl	$1,PCPU(INT_PENDING) ;					\
224	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
225	jmp	10f ;							\
2261: ;									\
227	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
228;	 								\
229	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
230	cmpl	$0,PCPU(INT_PENDING) ;					\
231	je	9f ;							\
232	call	unpend ;						\
2339: ;									\
234	pushl	$irq_num;			/* pass the IRQ */	\
235	call	sched_ithd ;						\
236	addl	$4, %esp ;		/* discard the parameter */	\
237;									\
238	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
23910: ;									\
240	MEXITCOUNT ;							\
241	jmp	doreti
242
243/*
244 * Handle "spurious INTerrupts".
245 * Notes:
246 *  This is different than the "spurious INTerrupt" generated by an
247 *   8259 PIC for missing INTs.  See the APIC documentation for details.
248 *  This routine should NOT do an 'EOI' cycle.
249 */
250	.text
251	SUPERALIGN_TEXT
252	.globl Xspuriousint
253Xspuriousint:
254
255	/* No EOI cycle used here */
256
257	iret
258
259/*
260 * Global address space TLB shootdown.
261 */
262	.text
263	SUPERALIGN_TEXT
264	.globl	Xinvltlb
265Xinvltlb:
266	pushl	%eax
267	pushl	%ds
268	movl	$KDSEL, %eax		/* Kernel data selector */
269	mov	%ax, %ds
270
271#ifdef COUNT_XINVLTLB_HITS
272	pushl	%fs
273	movl	$KPSEL, %eax		/* Private space selector */
274	mov	%ax, %fs
275	movl	PCPU(CPUID), %eax
276	popl	%fs
277	incl	xhits_gbl(,%eax,4)
278#endif /* COUNT_XINVLTLB_HITS */
279
280	movl	%cr3, %eax		/* invalidate the TLB */
281	movl	%eax, %cr3
282
283	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
284
285	lock
286	incl	smp_tlb_wait
287
288	popl	%ds
289	popl	%eax
290	iret
291
292/*
293 * Single page TLB shootdown
294 */
295	.text
296	SUPERALIGN_TEXT
297	.globl	Xinvlpg
298Xinvlpg:
299	pushl	%eax
300	pushl	%ds
301	movl	$KDSEL, %eax		/* Kernel data selector */
302	mov	%ax, %ds
303
304#ifdef COUNT_XINVLTLB_HITS
305	pushl	%fs
306	movl	$KPSEL, %eax		/* Private space selector */
307	mov	%ax, %fs
308	movl	PCPU(CPUID), %eax
309	popl	%fs
310	ss
311	incl	xhits_pg(,%eax,4)
312#endif /* COUNT_XINVLTLB_HITS */
313
314	movl	smp_tlb_addr1, %eax
315	invlpg	(%eax)			/* invalidate single page */
316
317	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
318
319	lock
320	incl	smp_tlb_wait
321
322	popl	%ds
323	popl	%eax
324	iret
325
326/*
327 * Page range TLB shootdown.
328 */
329	.text
330	SUPERALIGN_TEXT
331	.globl	Xinvlrng
332Xinvlrng:
333	pushl	%eax
334	pushl	%edx
335	pushl	%ds
336	movl	$KDSEL, %eax		/* Kernel data selector */
337	mov	%ax, %ds
338
339#ifdef COUNT_XINVLTLB_HITS
340	pushl	%fs
341	movl	$KPSEL, %eax		/* Private space selector */
342	mov	%ax, %fs
343	movl	PCPU(CPUID), %eax
344	popl	%fs
345	incl	xhits_rng(,%eax,4)
346#endif /* COUNT_XINVLTLB_HITS */
347
348	movl	smp_tlb_addr1, %edx
349	movl	smp_tlb_addr2, %eax
3501:	invlpg	(%edx)			/* invalidate single page */
351	addl	$PAGE_SIZE, %edx
352	cmpl	%edx, %eax
353	jb	1b
354
355	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
356
357	lock
358	incl	smp_tlb_wait
359
360	popl	%ds
361	popl	%edx
362	popl	%eax
363	iret
364
365/*
366 * Forward hardclock to another CPU.  Pushes a trapframe and calls
367 * forwarded_hardclock().
368 */
369	.text
370	SUPERALIGN_TEXT
371	.globl Xhardclock
372Xhardclock:
373	PUSH_FRAME
374	movl	$KDSEL, %eax	/* reload with kernel's data segment */
375	mov	%ax, %ds
376	mov	%ax, %es
377	movl	$KPSEL, %eax
378	mov	%ax, %fs
379
380	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
381
382	movl	PCPU(CURTHREAD),%ebx
383	cmpl	$0,TD_CRITNEST(%ebx)
384	je	1f
385	movl	$1,PCPU(INT_PENDING)
386	orl	$1,PCPU(SPENDING);
387	jmp	10f
3881:
389	incl	TD_INTR_NESTING_LEVEL(%ebx)
390	call	forwarded_hardclock
391	decl	TD_INTR_NESTING_LEVEL(%ebx)
39210:
393	MEXITCOUNT
394	jmp	doreti
395
396/*
397 * Forward statclock to another CPU.  Pushes a trapframe and calls
398 * forwarded_statclock().
399 */
400	.text
401	SUPERALIGN_TEXT
402	.globl Xstatclock
403Xstatclock:
404	PUSH_FRAME
405	movl	$KDSEL, %eax	/* reload with kernel's data segment */
406	mov	%ax, %ds
407	mov	%ax, %es
408	movl	$KPSEL, %eax
409	mov	%ax, %fs
410
411	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
412
413	FAKE_MCOUNT(13*4(%esp))
414
415	movl	PCPU(CURTHREAD),%ebx
416	cmpl	$0,TD_CRITNEST(%ebx)
417	je	1f
418	movl	$1,PCPU(INT_PENDING)
419	orl	$2,PCPU(SPENDING);
420	jmp	10f
4211:
422	incl	TD_INTR_NESTING_LEVEL(%ebx)
423	call	forwarded_statclock
424	decl	TD_INTR_NESTING_LEVEL(%ebx)
42510:
426	MEXITCOUNT
427	jmp	doreti
428
429/*
430 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
431 *
432 * The other CPU has already executed aston() or need_resched() on our
433 * current process, so we simply need to ack the interrupt and return
434 * via doreti to run ast().
435 */
436
437	.text
438	SUPERALIGN_TEXT
439	.globl Xcpuast
440Xcpuast:
441	PUSH_FRAME
442	movl	$KDSEL, %eax
443	mov	%ax, %ds		/* use KERNEL data segment */
444	mov	%ax, %es
445	movl	$KPSEL, %eax
446	mov	%ax, %fs
447
448	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
449
450	FAKE_MCOUNT(13*4(%esp))
451
452	MEXITCOUNT
453	jmp	doreti
454
455/*
456 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
457 *
458 *  - Signals its receipt.
459 *  - Waits for permission to restart.
460 *  - Signals its restart.
461 */
462	.text
463	SUPERALIGN_TEXT
464	.globl Xcpustop
465Xcpustop:
466	pushl	%ebp
467	movl	%esp, %ebp
468	pushl	%eax
469	pushl	%ecx
470	pushl	%edx
471	pushl	%ds			/* save current data segment */
472	pushl	%fs
473
474	movl	$KDSEL, %eax
475	mov	%ax, %ds		/* use KERNEL data segment */
476	movl	$KPSEL, %eax
477	mov	%ax, %fs
478
479	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
480
481	movl	PCPU(CPUID), %eax
482	imull	$PCB_SIZE, %eax
483	leal	CNAME(stoppcbs)(%eax), %eax
484	pushl	%eax
485	call	CNAME(savectx)		/* Save process context */
486	addl	$4, %esp
487
488	movl	PCPU(CPUID), %eax
489
490	lock
491	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4921:
493	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
494	jnc	1b
495
496	lock
497	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
498	lock
499	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
500
501	test	%eax, %eax
502	jnz	2f
503
504	movl	CNAME(cpustop_restartfunc), %eax
505	test	%eax, %eax
506	jz	2f
507	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
508
509	call	*%eax
5102:
511	popl	%fs
512	popl	%ds			/* restore previous data segment */
513	popl	%edx
514	popl	%ecx
515	popl	%eax
516	movl	%ebp, %esp
517	popl	%ebp
518	iret
519
520
521MCOUNT_LABEL(bintr)
522	FAST_INTR(0,fastintr0)
523	FAST_INTR(1,fastintr1)
524	FAST_INTR(2,fastintr2)
525	FAST_INTR(3,fastintr3)
526	FAST_INTR(4,fastintr4)
527	FAST_INTR(5,fastintr5)
528	FAST_INTR(6,fastintr6)
529	FAST_INTR(7,fastintr7)
530	FAST_INTR(8,fastintr8)
531	FAST_INTR(9,fastintr9)
532	FAST_INTR(10,fastintr10)
533	FAST_INTR(11,fastintr11)
534	FAST_INTR(12,fastintr12)
535	FAST_INTR(13,fastintr13)
536	FAST_INTR(14,fastintr14)
537	FAST_INTR(15,fastintr15)
538	FAST_INTR(16,fastintr16)
539	FAST_INTR(17,fastintr17)
540	FAST_INTR(18,fastintr18)
541	FAST_INTR(19,fastintr19)
542	FAST_INTR(20,fastintr20)
543	FAST_INTR(21,fastintr21)
544	FAST_INTR(22,fastintr22)
545	FAST_INTR(23,fastintr23)
546	FAST_INTR(24,fastintr24)
547	FAST_INTR(25,fastintr25)
548	FAST_INTR(26,fastintr26)
549	FAST_INTR(27,fastintr27)
550	FAST_INTR(28,fastintr28)
551	FAST_INTR(29,fastintr29)
552	FAST_INTR(30,fastintr30)
553	FAST_INTR(31,fastintr31)
554#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
555/* Threaded interrupts */
556	INTR(0,intr0, CLKINTR_PENDING)
557	INTR(1,intr1,)
558	INTR(2,intr2,)
559	INTR(3,intr3,)
560	INTR(4,intr4,)
561	INTR(5,intr5,)
562	INTR(6,intr6,)
563	INTR(7,intr7,)
564	INTR(8,intr8,)
565	INTR(9,intr9,)
566	INTR(10,intr10,)
567	INTR(11,intr11,)
568	INTR(12,intr12,)
569	INTR(13,intr13,)
570	INTR(14,intr14,)
571	INTR(15,intr15,)
572	INTR(16,intr16,)
573	INTR(17,intr17,)
574	INTR(18,intr18,)
575	INTR(19,intr19,)
576	INTR(20,intr20,)
577	INTR(21,intr21,)
578	INTR(22,intr22,)
579	INTR(23,intr23,)
580	INTR(24,intr24,)
581	INTR(25,intr25,)
582	INTR(26,intr26,)
583	INTR(27,intr27,)
584	INTR(28,intr28,)
585	INTR(29,intr29,)
586	INTR(30,intr30,)
587	INTR(31,intr31,)
588
589	FAST_UNPEND(0,fastunpend0)
590	FAST_UNPEND(1,fastunpend1)
591	FAST_UNPEND(2,fastunpend2)
592	FAST_UNPEND(3,fastunpend3)
593	FAST_UNPEND(4,fastunpend4)
594	FAST_UNPEND(5,fastunpend5)
595	FAST_UNPEND(6,fastunpend6)
596	FAST_UNPEND(7,fastunpend7)
597	FAST_UNPEND(8,fastunpend8)
598	FAST_UNPEND(9,fastunpend9)
599	FAST_UNPEND(10,fastunpend10)
600	FAST_UNPEND(11,fastunpend11)
601	FAST_UNPEND(12,fastunpend12)
602	FAST_UNPEND(13,fastunpend13)
603	FAST_UNPEND(14,fastunpend14)
604	FAST_UNPEND(15,fastunpend15)
605	FAST_UNPEND(16,fastunpend16)
606	FAST_UNPEND(17,fastunpend17)
607	FAST_UNPEND(18,fastunpend18)
608	FAST_UNPEND(19,fastunpend19)
609	FAST_UNPEND(20,fastunpend20)
610	FAST_UNPEND(21,fastunpend21)
611	FAST_UNPEND(22,fastunpend22)
612	FAST_UNPEND(23,fastunpend23)
613	FAST_UNPEND(24,fastunpend24)
614	FAST_UNPEND(25,fastunpend25)
615	FAST_UNPEND(26,fastunpend26)
616	FAST_UNPEND(27,fastunpend27)
617	FAST_UNPEND(28,fastunpend28)
618	FAST_UNPEND(29,fastunpend29)
619	FAST_UNPEND(30,fastunpend30)
620	FAST_UNPEND(31,fastunpend31)
621MCOUNT_LABEL(eintr)
622
623/*
624 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
625 *
626 * - Calls the generic rendezvous action function.
627 */
628	.text
629	SUPERALIGN_TEXT
630	.globl	Xrendezvous
631Xrendezvous:
632	PUSH_FRAME
633	movl	$KDSEL, %eax
634	mov	%ax, %ds		/* use KERNEL data segment */
635	mov	%ax, %es
636	movl	$KPSEL, %eax
637	mov	%ax, %fs
638
639	call	smp_rendezvous_action
640
641	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
642	POP_FRAME
643	iret
644
645
646	.data
647
648	.globl	apic_pin_trigger
649apic_pin_trigger:
650	.long	0
651
652	.text
653