apic_vector.s revision 91328
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 91328 2002-02-26 20:33:41Z dillon $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 *
18 */
19#define PUSH_FRAME							\
20	pushl	$0 ;		/* dummy error code */			\
21	pushl	$0 ;		/* dummy trap type */			\
22	pushal ;							\
23	pushl	%ds ;		/* save data and extra segments ... */	\
24	pushl	%es ;							\
25	pushl	%fs
26
27#define POP_FRAME							\
28	popl	%fs ;							\
29	popl	%es ;							\
30	popl	%ds ;							\
31	popal ;								\
32	addl	$4+4,%esp
33
34/*
35 * Macros for interrupt entry, call to handler, and exit.
36 */
37
38#define	FAST_INTR(irq_num, vec_name)					\
39	.text ;								\
40	SUPERALIGN_TEXT ;						\
41IDTVEC(vec_name) ;							\
42	PUSH_FRAME ;							\
43	movl	$KDSEL,%eax ;						\
44	mov	%ax,%ds ;						\
45	mov	%ax,%es ;						\
46	movl	$KPSEL,%eax ;						\
47	mov	%ax,%fs ;						\
48	FAKE_MCOUNT(13*4(%esp)) ;					\
49	call	critical_enter ;					\
50	movl	PCPU(CURTHREAD),%ebx ;					\
51	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
52	pushl	intr_unit + (irq_num) * 4 ;				\
53	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
54	addl	$4, %esp ;						\
55	movl	$0, lapic+LA_EOI ;					\
56	lock ; 								\
57	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
58	movl	intr_countp + (irq_num) * 4, %eax ;			\
59	lock ; 								\
60	incl	(%eax) ;						\
61	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
62	call	critical_exit ;						\
63	MEXITCOUNT ;							\
64	jmp	doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	ICU_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	ICU_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	ICU_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	ICU_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURTHREAD),%ebx ;					\
156	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
159	pushl	$irq_num;			/* pass the IRQ */	\
160	call	sched_ithd ;						\
161	addl	$4, %esp ;		/* discard the parameter */	\
162;									\
163	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
164	MEXITCOUNT ;							\
165	jmp	doreti
166
167/*
168 * Handle "spurious INTerrupts".
169 * Notes:
170 *  This is different than the "spurious INTerrupt" generated by an
171 *   8259 PIC for missing INTs.  See the APIC documentation for details.
172 *  This routine should NOT do an 'EOI' cycle.
173 */
174	.text
175	SUPERALIGN_TEXT
176	.globl Xspuriousint
177Xspuriousint:
178
179	/* No EOI cycle used here */
180
181	iret
182
183/*
184 * Global address space TLB shootdown.
185 */
186	.text
187	SUPERALIGN_TEXT
188	.globl	Xinvltlb
189Xinvltlb:
190	pushl	%eax
191	pushl	%ds
192	movl	$KDSEL, %eax		/* Kernel data selector */
193	mov	%ax, %ds
194
195#ifdef COUNT_XINVLTLB_HITS
196	pushl	%fs
197	movl	$KPSEL, %eax		/* Private space selector */
198	mov	%ax, %fs
199	movl	PCPU(CPUID), %eax
200	popl	%fs
201	incl	xhits_gbl(,%eax,4)
202#endif /* COUNT_XINVLTLB_HITS */
203
204	movl	%cr3, %eax		/* invalidate the TLB */
205	movl	%eax, %cr3
206
207	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
208
209	lock
210	incl	smp_tlb_wait
211
212	popl	%ds
213	popl	%eax
214	iret
215
216/*
217 * Single page TLB shootdown
218 */
219	.text
220	SUPERALIGN_TEXT
221	.globl	Xinvlpg
222Xinvlpg:
223	pushl	%eax
224	pushl	%ds
225	movl	$KDSEL, %eax		/* Kernel data selector */
226	mov	%ax, %ds
227
228#ifdef COUNT_XINVLTLB_HITS
229	pushl	%fs
230	movl	$KPSEL, %eax		/* Private space selector */
231	mov	%ax, %fs
232	movl	PCPU(CPUID), %eax
233	popl	%fs
234	ss
235	incl	xhits_pg(,%eax,4)
236#endif /* COUNT_XINVLTLB_HITS */
237
238	movl	smp_tlb_addr1, %eax
239	invlpg	(%eax)			/* invalidate single page */
240
241	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
242
243	lock
244	incl	smp_tlb_wait
245
246	popl	%ds
247	popl	%eax
248	iret
249
250/*
251 * Page range TLB shootdown.
252 */
253	.text
254	SUPERALIGN_TEXT
255	.globl	Xinvlrng
256Xinvlrng:
257	pushl	%eax
258	pushl	%edx
259	pushl	%ds
260	movl	$KDSEL, %eax		/* Kernel data selector */
261	mov	%ax, %ds
262
263#ifdef COUNT_XINVLTLB_HITS
264	pushl	%fs
265	movl	$KPSEL, %eax		/* Private space selector */
266	mov	%ax, %fs
267	movl	PCPU(CPUID), %eax
268	popl	%fs
269	incl	xhits_rng(,%eax,4)
270#endif /* COUNT_XINVLTLB_HITS */
271
272	movl	smp_tlb_addr1, %edx
273	movl	smp_tlb_addr2, %eax
2741:	invlpg	(%edx)			/* invalidate single page */
275	addl	$PAGE_SIZE, %edx
276	cmpl	%edx, %eax
277	jb	1b
278
279	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
280
281	lock
282	incl	smp_tlb_wait
283
284	popl	%ds
285	popl	%edx
286	popl	%eax
287	iret
288
289/*
290 * Forward hardclock to another CPU.  Pushes a trapframe and calls
291 * forwarded_hardclock().
292 */
293	.text
294	SUPERALIGN_TEXT
295	.globl Xhardclock
296Xhardclock:
297	PUSH_FRAME
298	movl	$KDSEL, %eax	/* reload with kernel's data segment */
299	mov	%ax, %ds
300	mov	%ax, %es
301	movl	$KPSEL, %eax
302	mov	%ax, %fs
303
304	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
305
306	movl	PCPU(CURTHREAD),%ebx
307	incl	TD_INTR_NESTING_LEVEL(%ebx)
308	call	forwarded_hardclock
309	decl	TD_INTR_NESTING_LEVEL(%ebx)
310	MEXITCOUNT
311	jmp	doreti
312
313/*
314 * Forward statclock to another CPU.  Pushes a trapframe and calls
315 * forwarded_statclock().
316 */
317	.text
318	SUPERALIGN_TEXT
319	.globl Xstatclock
320Xstatclock:
321	PUSH_FRAME
322	movl	$KDSEL, %eax	/* reload with kernel's data segment */
323	mov	%ax, %ds
324	mov	%ax, %es
325	movl	$KPSEL, %eax
326	mov	%ax, %fs
327
328	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
329
330	FAKE_MCOUNT(13*4(%esp))
331	movl	PCPU(CURTHREAD),%ebx
332	incl	TD_INTR_NESTING_LEVEL(%ebx)
333	call	forwarded_statclock
334	decl	TD_INTR_NESTING_LEVEL(%ebx)
335	MEXITCOUNT
336	jmp	doreti
337
338/*
339 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
340 *
341 * The other CPU has already executed aston() or need_resched() on our
342 * current process, so we simply need to ack the interrupt and return
343 * via doreti to run ast().
344 */
345
346	.text
347	SUPERALIGN_TEXT
348	.globl Xcpuast
349Xcpuast:
350	PUSH_FRAME
351	movl	$KDSEL, %eax
352	mov	%ax, %ds		/* use KERNEL data segment */
353	mov	%ax, %es
354	movl	$KPSEL, %eax
355	mov	%ax, %fs
356
357	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
358
359	FAKE_MCOUNT(13*4(%esp))
360
361	MEXITCOUNT
362	jmp	doreti
363
364/*
365 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
366 *
367 *  - Signals its receipt.
368 *  - Waits for permission to restart.
369 *  - Signals its restart.
370 */
371	.text
372	SUPERALIGN_TEXT
373	.globl Xcpustop
374Xcpustop:
375	pushl	%ebp
376	movl	%esp, %ebp
377	pushl	%eax
378	pushl	%ecx
379	pushl	%edx
380	pushl	%ds			/* save current data segment */
381	pushl	%fs
382
383	movl	$KDSEL, %eax
384	mov	%ax, %ds		/* use KERNEL data segment */
385	movl	$KPSEL, %eax
386	mov	%ax, %fs
387
388	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
389
390	movl	PCPU(CPUID), %eax
391	imull	$PCB_SIZE, %eax
392	leal	CNAME(stoppcbs)(%eax), %eax
393	pushl	%eax
394	call	CNAME(savectx)		/* Save process context */
395	addl	$4, %esp
396
397	movl	PCPU(CPUID), %eax
398
399	lock
400	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4011:
402	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
403	jnc	1b
404
405	lock
406	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
407	lock
408	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
409
410	test	%eax, %eax
411	jnz	2f
412
413	movl	CNAME(cpustop_restartfunc), %eax
414	test	%eax, %eax
415	jz	2f
416	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
417
418	call	*%eax
4192:
420	popl	%fs
421	popl	%ds			/* restore previous data segment */
422	popl	%edx
423	popl	%ecx
424	popl	%eax
425	movl	%ebp, %esp
426	popl	%ebp
427	iret
428
429
430MCOUNT_LABEL(bintr)
431	FAST_INTR(0,fastintr0)
432	FAST_INTR(1,fastintr1)
433	FAST_INTR(2,fastintr2)
434	FAST_INTR(3,fastintr3)
435	FAST_INTR(4,fastintr4)
436	FAST_INTR(5,fastintr5)
437	FAST_INTR(6,fastintr6)
438	FAST_INTR(7,fastintr7)
439	FAST_INTR(8,fastintr8)
440	FAST_INTR(9,fastintr9)
441	FAST_INTR(10,fastintr10)
442	FAST_INTR(11,fastintr11)
443	FAST_INTR(12,fastintr12)
444	FAST_INTR(13,fastintr13)
445	FAST_INTR(14,fastintr14)
446	FAST_INTR(15,fastintr15)
447	FAST_INTR(16,fastintr16)
448	FAST_INTR(17,fastintr17)
449	FAST_INTR(18,fastintr18)
450	FAST_INTR(19,fastintr19)
451	FAST_INTR(20,fastintr20)
452	FAST_INTR(21,fastintr21)
453	FAST_INTR(22,fastintr22)
454	FAST_INTR(23,fastintr23)
455	FAST_INTR(24,fastintr24)
456	FAST_INTR(25,fastintr25)
457	FAST_INTR(26,fastintr26)
458	FAST_INTR(27,fastintr27)
459	FAST_INTR(28,fastintr28)
460	FAST_INTR(29,fastintr29)
461	FAST_INTR(30,fastintr30)
462	FAST_INTR(31,fastintr31)
463#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
464/* Threaded interrupts */
465	INTR(0,intr0, CLKINTR_PENDING)
466	INTR(1,intr1,)
467	INTR(2,intr2,)
468	INTR(3,intr3,)
469	INTR(4,intr4,)
470	INTR(5,intr5,)
471	INTR(6,intr6,)
472	INTR(7,intr7,)
473	INTR(8,intr8,)
474	INTR(9,intr9,)
475	INTR(10,intr10,)
476	INTR(11,intr11,)
477	INTR(12,intr12,)
478	INTR(13,intr13,)
479	INTR(14,intr14,)
480	INTR(15,intr15,)
481	INTR(16,intr16,)
482	INTR(17,intr17,)
483	INTR(18,intr18,)
484	INTR(19,intr19,)
485	INTR(20,intr20,)
486	INTR(21,intr21,)
487	INTR(22,intr22,)
488	INTR(23,intr23,)
489	INTR(24,intr24,)
490	INTR(25,intr25,)
491	INTR(26,intr26,)
492	INTR(27,intr27,)
493	INTR(28,intr28,)
494	INTR(29,intr29,)
495	INTR(30,intr30,)
496	INTR(31,intr31,)
497MCOUNT_LABEL(eintr)
498
499/*
500 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
501 *
502 * - Calls the generic rendezvous action function.
503 */
504	.text
505	SUPERALIGN_TEXT
506	.globl	Xrendezvous
507Xrendezvous:
508	PUSH_FRAME
509	movl	$KDSEL, %eax
510	mov	%ax, %ds		/* use KERNEL data segment */
511	mov	%ax, %es
512	movl	$KPSEL, %eax
513	mov	%ax, %fs
514
515	call	smp_rendezvous_action
516
517	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
518	POP_FRAME
519	iret
520
521
522	.data
523
524	.globl	apic_pin_trigger
525apic_pin_trigger:
526	.long	0
527
528	.text
529