apic_vector.s revision 99862
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 99862 2002-07-12 07:56:11Z peter $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 *
18 */
19#define PUSH_FRAME							\
20	pushl	$0 ;		/* dummy error code */			\
21	pushl	$0 ;		/* dummy trap type */			\
22	pushal ;		/* 8 ints */				\
23	pushl	%ds ;		/* save data and extra segments ... */	\
24	pushl	%es ;							\
25	pushl	%fs
26
27#define PUSH_DUMMY							\
28	pushfl ;		/* eflags */				\
29	pushl	%cs ;		/* cs */				\
30	pushl	12(%esp) ;	/* original caller eip */		\
31	pushl	$0 ;		/* dummy error code */			\
32	pushl	$0 ;		/* dummy trap type */			\
33	subl	$11*4,%esp ;
34
35#define POP_FRAME							\
36	popl	%fs ;							\
37	popl	%es ;							\
38	popl	%ds ;							\
39	popal ;								\
40	addl	$4+4,%esp
41
42#define POP_DUMMY							\
43	addl	$16*4,%esp
44
45#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
46#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
47
48#define MASK_IRQ(irq_num)						\
49	ICU_LOCK ;				/* into critical reg */	\
50	testl	$IRQ_BIT(irq_num), apic_imen ;				\
51	jne	7f ;			/* masked, don't mask */	\
52	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
53	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
54	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
55	movl	%eax, (%ecx) ;			/* write the index */	\
56	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
57	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
58	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
597: ;						/* already masked */	\
60	ICU_UNLOCK
61/*
62 * Test to see whether we are handling an edge or level triggered INT.
63 *  Level-triggered INTs must still be masked as we don't clear the source,
64 *  and the EOI cycle would cause redundant INTs to occur.
65 */
66#define MASK_LEVEL_IRQ(irq_num)						\
67	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
68	jz	9f ;				/* edge, don't mask */	\
69	MASK_IRQ(irq_num) ;						\
709:
71
72
73#ifdef APIC_INTR_REORDER
74#define EOI_IRQ(irq_num)						\
75	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
76	movl	(%eax), %eax ;						\
77	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
78	jz	9f ;				/* not active */	\
79	movl	$0, lapic+LA_EOI ;					\
809:
81
82#else
83#define EOI_IRQ(irq_num)						\
84	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
85	jz	9f	;			/* not active */	\
86	movl	$0, lapic+LA_EOI;					\
879:
88#endif
89
90
91/*
92 * Test to see if the source is currently masked, clear if so.
93 */
94#define UNMASK_IRQ(irq_num)					\
95	ICU_LOCK ;				/* into critical reg */	\
96	testl	$IRQ_BIT(irq_num), apic_imen ;				\
97	je	7f ;			/* bit clear, not masked */	\
98	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
99	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
100	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
101	movl	%eax, (%ecx) ;			/* write the index */	\
102	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
103	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
104	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1057: ;						/* already unmasked */	\
106	ICU_UNLOCK
107
108/*
109 * Test to see whether we are handling an edge or level triggered INT.
110 *  Level-triggered INTs have to be unmasked.
111 */
112#define UNMASK_LEVEL_IRQ(irq_num)					\
113	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
114	jz	9f ;			/* edge, don't unmask */	\
115	UNMASK_IRQ(irq_num) ;						\
1169:
117
118/*
119 * Macros for interrupt entry, call to handler, and exit.
120 */
121
122#define	FAST_INTR(irq_num, vec_name)					\
123	.text ;								\
124	SUPERALIGN_TEXT ;						\
125IDTVEC(vec_name) ;							\
126	PUSH_FRAME ;							\
127	movl	$KDSEL,%eax ;						\
128	mov	%ax,%ds ;						\
129	mov	%ax,%es ;						\
130	movl	$KPSEL,%eax ;						\
131	mov	%ax,%fs ;						\
132	FAKE_MCOUNT(13*4(%esp)) ;					\
133	movl	PCPU(CURTHREAD),%ebx ;					\
134	cmpl	$0,TD_CRITNEST(%ebx) ;					\
135	je	1f ;							\
136;									\
137	movl	$1,PCPU(INT_PENDING) ;					\
138	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
139	MASK_LEVEL_IRQ(irq_num) ;					\
140	movl	$0, lapic+LA_EOI ;					\
141	jmp	10f ;							\
1421: ;									\
143	incl	TD_CRITNEST(%ebx) ;					\
144	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
145	pushl	intr_unit + (irq_num) * 4 ;				\
146	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
147	addl	$4, %esp ;						\
148	movl	$0, lapic+LA_EOI ;					\
149	lock ; 								\
150	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
151	movl	intr_countp + (irq_num) * 4, %eax ;			\
152	lock ; 								\
153	incl	(%eax) ;						\
154	decl	TD_CRITNEST(%ebx) ;					\
155	cmpl	$0,PCPU(INT_PENDING) ;					\
156	je	2f ;							\
157;									\
158	call	i386_unpend ;						\
1592: ;									\
160	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16110: ;									\
162	MEXITCOUNT ;							\
163	jmp	doreti
164
165/*
166 * Restart a fast interrupt that was held up by a critical section.
167 * This routine is called from unpend().  unpend() ensures we are
168 * in a critical section and deals with the interrupt nesting level
169 * for us.  If we previously masked the irq, we have to unmask it.
170 *
171 * We have a choice.  We can regenerate the irq using the 'int'
172 * instruction or we can create a dummy frame and call the interrupt
173 * handler directly.  I've chosen to use the dummy-frame method.
174 */
175#define	FAST_UNPEND(irq_num, vec_name)					\
176	.text ;								\
177	SUPERALIGN_TEXT ;						\
178IDTVEC(vec_name) ;							\
179;									\
180	pushl	%ebp ;							\
181	movl	%esp, %ebp ;						\
182	PUSH_DUMMY ;							\
183	pushl	intr_unit + (irq_num) * 4 ;				\
184	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
185	addl	$4, %esp ;						\
186	lock ; 								\
187	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
188	movl	intr_countp + (irq_num) * 4, %eax ;			\
189	lock ; 								\
190	incl	(%eax) ;						\
191	UNMASK_LEVEL_IRQ(irq_num) ;					\
192	POP_DUMMY ;							\
193	popl %ebp ;							\
194	ret ;								\
195
196
197/*
198 * Slow, threaded interrupts.
199 *
200 * XXX Most of the parameters here are obsolete.  Fix this when we're
201 * done.
202 * XXX we really shouldn't return via doreti if we just schedule the
203 * interrupt handler and don't run anything.  We could just do an
204 * iret.  FIXME.
205 */
206#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
207	.text ;								\
208	SUPERALIGN_TEXT ;						\
209/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
210IDTVEC(vec_name) ;							\
211	PUSH_FRAME ;							\
212	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
213	mov	%ax, %ds ;						\
214	mov	%ax, %es ;						\
215	movl	$KPSEL, %eax ;						\
216	mov	%ax, %fs ;						\
217;									\
218	maybe_extra_ipending ;						\
219;									\
220	MASK_LEVEL_IRQ(irq_num) ;					\
221	EOI_IRQ(irq_num) ;						\
222;									\
223	movl	PCPU(CURTHREAD),%ebx ;					\
224	cmpl	$0,TD_CRITNEST(%ebx) ;					\
225	je	1f ;							\
226	movl	$1,PCPU(INT_PENDING) ;					\
227	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
228	jmp	10f ;							\
2291: ;									\
230	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
231;	 								\
232	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
233	cmpl	$0,PCPU(INT_PENDING) ;					\
234	je	9f ;							\
235	call	i386_unpend ;						\
2369: ;									\
237	pushl	$irq_num;			/* pass the IRQ */	\
238	call	sched_ithd ;						\
239	addl	$4, %esp ;		/* discard the parameter */	\
240;									\
241	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
24210: ;									\
243	MEXITCOUNT ;							\
244	jmp	doreti
245
246/*
247 * Handle "spurious INTerrupts".
248 * Notes:
249 *  This is different than the "spurious INTerrupt" generated by an
250 *   8259 PIC for missing INTs.  See the APIC documentation for details.
251 *  This routine should NOT do an 'EOI' cycle.
252 */
253	.text
254	SUPERALIGN_TEXT
255	.globl Xspuriousint
256Xspuriousint:
257
258	/* No EOI cycle used here */
259
260	iret
261
262/*
263 * Global address space TLB shootdown.
264 */
265	.text
266	SUPERALIGN_TEXT
267	.globl	Xinvltlb
268Xinvltlb:
269	pushl	%eax
270	pushl	%ds
271	movl	$KDSEL, %eax		/* Kernel data selector */
272	mov	%ax, %ds
273
274#ifdef COUNT_XINVLTLB_HITS
275	pushl	%fs
276	movl	$KPSEL, %eax		/* Private space selector */
277	mov	%ax, %fs
278	movl	PCPU(CPUID), %eax
279	popl	%fs
280	incl	xhits_gbl(,%eax,4)
281#endif /* COUNT_XINVLTLB_HITS */
282
283	movl	%cr3, %eax		/* invalidate the TLB */
284	movl	%eax, %cr3
285
286	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
287
288	lock
289	incl	smp_tlb_wait
290
291	popl	%ds
292	popl	%eax
293	iret
294
295/*
296 * Single page TLB shootdown
297 */
298	.text
299	SUPERALIGN_TEXT
300	.globl	Xinvlpg
301Xinvlpg:
302	pushl	%eax
303	pushl	%ds
304	movl	$KDSEL, %eax		/* Kernel data selector */
305	mov	%ax, %ds
306
307#ifdef COUNT_XINVLTLB_HITS
308	pushl	%fs
309	movl	$KPSEL, %eax		/* Private space selector */
310	mov	%ax, %fs
311	movl	PCPU(CPUID), %eax
312	popl	%fs
313	incl	xhits_pg(,%eax,4)
314#endif /* COUNT_XINVLTLB_HITS */
315
316	movl	smp_tlb_addr1, %eax
317	invlpg	(%eax)			/* invalidate single page */
318
319	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
320
321	lock
322	incl	smp_tlb_wait
323
324	popl	%ds
325	popl	%eax
326	iret
327
328/*
329 * Page range TLB shootdown.
330 */
331	.text
332	SUPERALIGN_TEXT
333	.globl	Xinvlrng
334Xinvlrng:
335	pushl	%eax
336	pushl	%edx
337	pushl	%ds
338	movl	$KDSEL, %eax		/* Kernel data selector */
339	mov	%ax, %ds
340
341#ifdef COUNT_XINVLTLB_HITS
342	pushl	%fs
343	movl	$KPSEL, %eax		/* Private space selector */
344	mov	%ax, %fs
345	movl	PCPU(CPUID), %eax
346	popl	%fs
347	incl	xhits_rng(,%eax,4)
348#endif /* COUNT_XINVLTLB_HITS */
349
350	movl	smp_tlb_addr1, %edx
351	movl	smp_tlb_addr2, %eax
3521:	invlpg	(%edx)			/* invalidate single page */
353	addl	$PAGE_SIZE, %edx
354	cmpl	%edx, %eax
355	jb	1b
356
357	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
358
359	lock
360	incl	smp_tlb_wait
361
362	popl	%ds
363	popl	%edx
364	popl	%eax
365	iret
366
367/*
368 * Forward hardclock to another CPU.  Pushes a trapframe and calls
369 * forwarded_hardclock().
370 */
371	.text
372	SUPERALIGN_TEXT
373	.globl Xhardclock
374Xhardclock:
375	PUSH_FRAME
376	movl	$KDSEL, %eax	/* reload with kernel's data segment */
377	mov	%ax, %ds
378	mov	%ax, %es
379	movl	$KPSEL, %eax
380	mov	%ax, %fs
381
382	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
383
384	movl	PCPU(CURTHREAD),%ebx
385	cmpl	$0,TD_CRITNEST(%ebx)
386	je	1f
387	movl	$1,PCPU(INT_PENDING)
388	orl	$1,PCPU(SPENDING);
389	jmp	10f
3901:
391	incl	TD_INTR_NESTING_LEVEL(%ebx)
392	call	forwarded_hardclock
393	decl	TD_INTR_NESTING_LEVEL(%ebx)
39410:
395	MEXITCOUNT
396	jmp	doreti
397
398/*
399 * Forward statclock to another CPU.  Pushes a trapframe and calls
400 * forwarded_statclock().
401 */
402	.text
403	SUPERALIGN_TEXT
404	.globl Xstatclock
405Xstatclock:
406	PUSH_FRAME
407	movl	$KDSEL, %eax	/* reload with kernel's data segment */
408	mov	%ax, %ds
409	mov	%ax, %es
410	movl	$KPSEL, %eax
411	mov	%ax, %fs
412
413	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
414
415	FAKE_MCOUNT(13*4(%esp))
416
417	movl	PCPU(CURTHREAD),%ebx
418	cmpl	$0,TD_CRITNEST(%ebx)
419	je	1f
420	movl	$1,PCPU(INT_PENDING)
421	orl	$2,PCPU(SPENDING);
422	jmp	10f
4231:
424	incl	TD_INTR_NESTING_LEVEL(%ebx)
425	call	forwarded_statclock
426	decl	TD_INTR_NESTING_LEVEL(%ebx)
42710:
428	MEXITCOUNT
429	jmp	doreti
430
431/*
432 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
433 *
434 * The other CPU has already executed aston() or need_resched() on our
435 * current process, so we simply need to ack the interrupt and return
436 * via doreti to run ast().
437 */
438
439	.text
440	SUPERALIGN_TEXT
441	.globl Xcpuast
442Xcpuast:
443	PUSH_FRAME
444	movl	$KDSEL, %eax
445	mov	%ax, %ds		/* use KERNEL data segment */
446	mov	%ax, %es
447	movl	$KPSEL, %eax
448	mov	%ax, %fs
449
450	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
451
452	FAKE_MCOUNT(13*4(%esp))
453
454	MEXITCOUNT
455	jmp	doreti
456
457/*
458 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
459 *
460 *  - Signals its receipt.
461 *  - Waits for permission to restart.
462 *  - Signals its restart.
463 */
464	.text
465	SUPERALIGN_TEXT
466	.globl Xcpustop
467Xcpustop:
468	pushl	%ebp
469	movl	%esp, %ebp
470	pushl	%eax
471	pushl	%ecx
472	pushl	%edx
473	pushl	%ds			/* save current data segment */
474	pushl	%fs
475
476	movl	$KDSEL, %eax
477	mov	%ax, %ds		/* use KERNEL data segment */
478	movl	$KPSEL, %eax
479	mov	%ax, %fs
480
481	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
482
483	movl	PCPU(CPUID), %eax
484	imull	$PCB_SIZE, %eax
485	leal	CNAME(stoppcbs)(%eax), %eax
486	pushl	%eax
487	call	CNAME(savectx)		/* Save process context */
488	addl	$4, %esp
489
490	movl	PCPU(CPUID), %eax
491
492	lock
493	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4941:
495	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
496	jnc	1b
497
498	lock
499	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
500	lock
501	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
502
503	test	%eax, %eax
504	jnz	2f
505
506	movl	CNAME(cpustop_restartfunc), %eax
507	test	%eax, %eax
508	jz	2f
509	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
510
511	call	*%eax
5122:
513	popl	%fs
514	popl	%ds			/* restore previous data segment */
515	popl	%edx
516	popl	%ecx
517	popl	%eax
518	movl	%ebp, %esp
519	popl	%ebp
520	iret
521
522
523MCOUNT_LABEL(bintr)
524	FAST_INTR(0,fastintr0)
525	FAST_INTR(1,fastintr1)
526	FAST_INTR(2,fastintr2)
527	FAST_INTR(3,fastintr3)
528	FAST_INTR(4,fastintr4)
529	FAST_INTR(5,fastintr5)
530	FAST_INTR(6,fastintr6)
531	FAST_INTR(7,fastintr7)
532	FAST_INTR(8,fastintr8)
533	FAST_INTR(9,fastintr9)
534	FAST_INTR(10,fastintr10)
535	FAST_INTR(11,fastintr11)
536	FAST_INTR(12,fastintr12)
537	FAST_INTR(13,fastintr13)
538	FAST_INTR(14,fastintr14)
539	FAST_INTR(15,fastintr15)
540	FAST_INTR(16,fastintr16)
541	FAST_INTR(17,fastintr17)
542	FAST_INTR(18,fastintr18)
543	FAST_INTR(19,fastintr19)
544	FAST_INTR(20,fastintr20)
545	FAST_INTR(21,fastintr21)
546	FAST_INTR(22,fastintr22)
547	FAST_INTR(23,fastintr23)
548	FAST_INTR(24,fastintr24)
549	FAST_INTR(25,fastintr25)
550	FAST_INTR(26,fastintr26)
551	FAST_INTR(27,fastintr27)
552	FAST_INTR(28,fastintr28)
553	FAST_INTR(29,fastintr29)
554	FAST_INTR(30,fastintr30)
555	FAST_INTR(31,fastintr31)
556#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
557/* Threaded interrupts */
558	INTR(0,intr0, CLKINTR_PENDING)
559	INTR(1,intr1,)
560	INTR(2,intr2,)
561	INTR(3,intr3,)
562	INTR(4,intr4,)
563	INTR(5,intr5,)
564	INTR(6,intr6,)
565	INTR(7,intr7,)
566	INTR(8,intr8,)
567	INTR(9,intr9,)
568	INTR(10,intr10,)
569	INTR(11,intr11,)
570	INTR(12,intr12,)
571	INTR(13,intr13,)
572	INTR(14,intr14,)
573	INTR(15,intr15,)
574	INTR(16,intr16,)
575	INTR(17,intr17,)
576	INTR(18,intr18,)
577	INTR(19,intr19,)
578	INTR(20,intr20,)
579	INTR(21,intr21,)
580	INTR(22,intr22,)
581	INTR(23,intr23,)
582	INTR(24,intr24,)
583	INTR(25,intr25,)
584	INTR(26,intr26,)
585	INTR(27,intr27,)
586	INTR(28,intr28,)
587	INTR(29,intr29,)
588	INTR(30,intr30,)
589	INTR(31,intr31,)
590
591	FAST_UNPEND(0,fastunpend0)
592	FAST_UNPEND(1,fastunpend1)
593	FAST_UNPEND(2,fastunpend2)
594	FAST_UNPEND(3,fastunpend3)
595	FAST_UNPEND(4,fastunpend4)
596	FAST_UNPEND(5,fastunpend5)
597	FAST_UNPEND(6,fastunpend6)
598	FAST_UNPEND(7,fastunpend7)
599	FAST_UNPEND(8,fastunpend8)
600	FAST_UNPEND(9,fastunpend9)
601	FAST_UNPEND(10,fastunpend10)
602	FAST_UNPEND(11,fastunpend11)
603	FAST_UNPEND(12,fastunpend12)
604	FAST_UNPEND(13,fastunpend13)
605	FAST_UNPEND(14,fastunpend14)
606	FAST_UNPEND(15,fastunpend15)
607	FAST_UNPEND(16,fastunpend16)
608	FAST_UNPEND(17,fastunpend17)
609	FAST_UNPEND(18,fastunpend18)
610	FAST_UNPEND(19,fastunpend19)
611	FAST_UNPEND(20,fastunpend20)
612	FAST_UNPEND(21,fastunpend21)
613	FAST_UNPEND(22,fastunpend22)
614	FAST_UNPEND(23,fastunpend23)
615	FAST_UNPEND(24,fastunpend24)
616	FAST_UNPEND(25,fastunpend25)
617	FAST_UNPEND(26,fastunpend26)
618	FAST_UNPEND(27,fastunpend27)
619	FAST_UNPEND(28,fastunpend28)
620	FAST_UNPEND(29,fastunpend29)
621	FAST_UNPEND(30,fastunpend30)
622	FAST_UNPEND(31,fastunpend31)
623MCOUNT_LABEL(eintr)
624
625/*
626 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
627 *
628 * - Calls the generic rendezvous action function.
629 */
630	.text
631	SUPERALIGN_TEXT
632	.globl	Xrendezvous
633Xrendezvous:
634	PUSH_FRAME
635	movl	$KDSEL, %eax
636	mov	%ax, %ds		/* use KERNEL data segment */
637	mov	%ax, %es
638	movl	$KPSEL, %eax
639	mov	%ax, %fs
640
641	call	smp_rendezvous_action
642
643	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
644	POP_FRAME
645	iret
646
647
648	.data
649
650	.globl	apic_pin_trigger
651apic_pin_trigger:
652	.long	0
653
654	.text
655