apic_vector.s revision 112993
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 112993 2003-04-02 23:53:30Z peter $
4 */
5
6#include "opt_swtch.h"
7
8#include <machine/apic.h>
9#include <machine/smp.h>
10
11/* convert an absolute IRQ# into a bitmask */
12#define IRQ_BIT(irq_num)	(1 << (irq_num))
13
14/* make an index into the IO APIC from the IRQ# */
15#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
16
17/*
18 *
19 */
20#define PUSH_FRAME							\
21	pushl	$0 ;		/* dummy error code */			\
22	pushl	$0 ;		/* dummy trap type */			\
23	pushal ;		/* 8 ints */				\
24	pushl	%ds ;		/* save data and extra segments ... */	\
25	pushl	%es ;							\
26	pushl	%fs
27
28#define PUSH_DUMMY							\
29	pushfl ;		/* eflags */				\
30	pushl	%cs ;		/* cs */				\
31	pushl	12(%esp) ;	/* original caller eip */		\
32	pushl	$0 ;		/* dummy error code */			\
33	pushl	$0 ;		/* dummy trap type */			\
34	subl	$11*4,%esp ;
35
36#define POP_FRAME							\
37	popl	%fs ;							\
38	popl	%es ;							\
39	popl	%ds ;							\
40	popal ;								\
41	addl	$4+4,%esp
42
43#define POP_DUMMY							\
44	addl	$16*4,%esp
45
46#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
47#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
48
49#define MASK_IRQ(irq_num)						\
50	ICU_LOCK ;				/* into critical reg */	\
51	testl	$IRQ_BIT(irq_num), apic_imen ;				\
52	jne	7f ;			/* masked, don't mask */	\
53	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
54	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
55	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
56	movl	%eax, (%ecx) ;			/* write the index */	\
57	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
58	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
59	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
607: ;						/* already masked */	\
61	ICU_UNLOCK
62/*
63 * Test to see whether we are handling an edge or level triggered INT.
64 *  Level-triggered INTs must still be masked as we don't clear the source,
65 *  and the EOI cycle would cause redundant INTs to occur.
66 */
67#define MASK_LEVEL_IRQ(irq_num)						\
68	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
69	jz	9f ;				/* edge, don't mask */	\
70	MASK_IRQ(irq_num) ;						\
719:
72
73
74#ifdef APIC_INTR_REORDER
75#define EOI_IRQ(irq_num)						\
76	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
77	movl	(%eax), %eax ;						\
78	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
79	jz	9f ;				/* not active */	\
80	movl	$0, lapic+LA_EOI ;					\
819:
82
83#else
84#define EOI_IRQ(irq_num)						\
85	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
86	jz	9f	;			/* not active */	\
87	movl	$0, lapic+LA_EOI;					\
889:
89#endif
90
91
92/*
93 * Test to see if the source is currently masked, clear if so.
94 */
95#define UNMASK_IRQ(irq_num)					\
96	ICU_LOCK ;				/* into critical reg */	\
97	testl	$IRQ_BIT(irq_num), apic_imen ;				\
98	je	7f ;			/* bit clear, not masked */	\
99	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
100	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
101	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
102	movl	%eax, (%ecx) ;			/* write the index */	\
103	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
104	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
105	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1067: ;						/* already unmasked */	\
107	ICU_UNLOCK
108
109/*
110 * Test to see whether we are handling an edge or level triggered INT.
111 *  Level-triggered INTs have to be unmasked.
112 */
113#define UNMASK_LEVEL_IRQ(irq_num)					\
114	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
115	jz	9f ;			/* edge, don't unmask */	\
116	UNMASK_IRQ(irq_num) ;						\
1179:
118
119/*
120 * Macros for interrupt entry, call to handler, and exit.
121 */
122
123#define	FAST_INTR(irq_num, vec_name)					\
124	.text ;								\
125	SUPERALIGN_TEXT ;						\
126IDTVEC(vec_name) ;							\
127	PUSH_FRAME ;							\
128	movl	$KDSEL,%eax ;						\
129	mov	%ax,%ds ;						\
130	mov	%ax,%es ;						\
131	movl	$KPSEL,%eax ;						\
132	mov	%ax,%fs ;						\
133	FAKE_MCOUNT(13*4(%esp)) ;					\
134	movl	PCPU(CURTHREAD),%ebx ;					\
135	cmpl	$0,TD_CRITNEST(%ebx) ;					\
136	je	1f ;							\
137;									\
138	movl	$1,PCPU(INT_PENDING) ;					\
139	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
140	MASK_LEVEL_IRQ(irq_num) ;					\
141	movl	$0, lapic+LA_EOI ;					\
142	jmp	10f ;							\
1431: ;									\
144	incl	TD_CRITNEST(%ebx) ;					\
145	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
146	pushl	intr_unit + (irq_num) * 4 ;				\
147	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
148	addl	$4, %esp ;						\
149	movl	$0, lapic+LA_EOI ;					\
150	lock ; 								\
151	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
152	movl	intr_countp + (irq_num) * 4, %eax ;			\
153	lock ; 								\
154	incl	(%eax) ;						\
155	decl	TD_CRITNEST(%ebx) ;					\
156	cmpl	$0,PCPU(INT_PENDING) ;					\
157	je	2f ;							\
158;									\
159	call	i386_unpend ;						\
1602: ;									\
161	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16210: ;									\
163	MEXITCOUNT ;							\
164	jmp	doreti
165
166/*
167 * Restart a fast interrupt that was held up by a critical section.
168 * This routine is called from unpend().  unpend() ensures we are
169 * in a critical section and deals with the interrupt nesting level
170 * for us.  If we previously masked the irq, we have to unmask it.
171 *
172 * We have a choice.  We can regenerate the irq using the 'int'
173 * instruction or we can create a dummy frame and call the interrupt
174 * handler directly.  I've chosen to use the dummy-frame method.
175 */
176#define	FAST_UNPEND(irq_num, vec_name)					\
177	.text ;								\
178	SUPERALIGN_TEXT ;						\
179IDTVEC(vec_name) ;							\
180;									\
181	pushl	%ebp ;							\
182	movl	%esp, %ebp ;						\
183	PUSH_DUMMY ;							\
184	pushl	intr_unit + (irq_num) * 4 ;				\
185	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
186	addl	$4, %esp ;						\
187	lock ; 								\
188	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
189	movl	intr_countp + (irq_num) * 4, %eax ;			\
190	lock ; 								\
191	incl	(%eax) ;						\
192	UNMASK_LEVEL_IRQ(irq_num) ;					\
193	POP_DUMMY ;							\
194	popl %ebp ;							\
195	ret ;								\
196
197
198/*
199 * Slow, threaded interrupts.
200 *
201 * XXX Most of the parameters here are obsolete.  Fix this when we're
202 * done.
203 * XXX we really shouldn't return via doreti if we just schedule the
204 * interrupt handler and don't run anything.  We could just do an
205 * iret.  FIXME.
206 */
207#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
208	.text ;								\
209	SUPERALIGN_TEXT ;						\
210/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
211IDTVEC(vec_name) ;							\
212	PUSH_FRAME ;							\
213	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
214	mov	%ax, %ds ;						\
215	mov	%ax, %es ;						\
216	movl	$KPSEL, %eax ;						\
217	mov	%ax, %fs ;						\
218;									\
219	maybe_extra_ipending ;						\
220;									\
221	MASK_LEVEL_IRQ(irq_num) ;					\
222	EOI_IRQ(irq_num) ;						\
223;									\
224	movl	PCPU(CURTHREAD),%ebx ;					\
225	cmpl	$0,TD_CRITNEST(%ebx) ;					\
226	je	1f ;							\
227	movl	$1,PCPU(INT_PENDING) ;					\
228	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
229	jmp	10f ;							\
2301: ;									\
231	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
232;	 								\
233	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
234	cmpl	$0,PCPU(INT_PENDING) ;					\
235	je	9f ;							\
236	call	i386_unpend ;						\
2379: ;									\
238	pushl	$irq_num;			/* pass the IRQ */	\
239	call	sched_ithd ;						\
240	addl	$4, %esp ;		/* discard the parameter */	\
241;									\
242	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
24310: ;									\
244	MEXITCOUNT ;							\
245	jmp	doreti
246
247/*
248 * Handle "spurious INTerrupts".
249 * Notes:
250 *  This is different than the "spurious INTerrupt" generated by an
251 *   8259 PIC for missing INTs.  See the APIC documentation for details.
252 *  This routine should NOT do an 'EOI' cycle.
253 */
254	.text
255	SUPERALIGN_TEXT
256	.globl Xspuriousint
257Xspuriousint:
258
259	/* No EOI cycle used here */
260
261	iret
262
263/*
264 * Global address space TLB shootdown.
265 */
266	.text
267	SUPERALIGN_TEXT
268	.globl	Xinvltlb
269Xinvltlb:
270	pushl	%eax
271	pushl	%ds
272	movl	$KDSEL, %eax		/* Kernel data selector */
273	mov	%ax, %ds
274
275#ifdef COUNT_XINVLTLB_HITS
276	pushl	%fs
277	movl	$KPSEL, %eax		/* Private space selector */
278	mov	%ax, %fs
279	movl	PCPU(CPUID), %eax
280	popl	%fs
281	incl	xhits_gbl(,%eax,4)
282#endif /* COUNT_XINVLTLB_HITS */
283
284	movl	%cr3, %eax		/* invalidate the TLB */
285	movl	%eax, %cr3
286
287	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
288
289	lock
290	incl	smp_tlb_wait
291
292	popl	%ds
293	popl	%eax
294	iret
295
296/*
297 * Single page TLB shootdown
298 */
299	.text
300	SUPERALIGN_TEXT
301	.globl	Xinvlpg
302Xinvlpg:
303	pushl	%eax
304	pushl	%ds
305	movl	$KDSEL, %eax		/* Kernel data selector */
306	mov	%ax, %ds
307
308#ifdef COUNT_XINVLTLB_HITS
309	pushl	%fs
310	movl	$KPSEL, %eax		/* Private space selector */
311	mov	%ax, %fs
312	movl	PCPU(CPUID), %eax
313	popl	%fs
314	incl	xhits_pg(,%eax,4)
315#endif /* COUNT_XINVLTLB_HITS */
316
317	movl	smp_tlb_addr1, %eax
318	invlpg	(%eax)			/* invalidate single page */
319
320	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
321
322	lock
323	incl	smp_tlb_wait
324
325	popl	%ds
326	popl	%eax
327	iret
328
329/*
330 * Page range TLB shootdown.
331 */
332	.text
333	SUPERALIGN_TEXT
334	.globl	Xinvlrng
335Xinvlrng:
336	pushl	%eax
337	pushl	%edx
338	pushl	%ds
339	movl	$KDSEL, %eax		/* Kernel data selector */
340	mov	%ax, %ds
341
342#ifdef COUNT_XINVLTLB_HITS
343	pushl	%fs
344	movl	$KPSEL, %eax		/* Private space selector */
345	mov	%ax, %fs
346	movl	PCPU(CPUID), %eax
347	popl	%fs
348	incl	xhits_rng(,%eax,4)
349#endif /* COUNT_XINVLTLB_HITS */
350
351	movl	smp_tlb_addr1, %edx
352	movl	smp_tlb_addr2, %eax
3531:	invlpg	(%edx)			/* invalidate single page */
354	addl	$PAGE_SIZE, %edx
355	cmpl	%eax, %edx
356	jb	1b
357
358	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
359
360	lock
361	incl	smp_tlb_wait
362
363	popl	%ds
364	popl	%edx
365	popl	%eax
366	iret
367
368/*
369 * Forward hardclock to another CPU.  Pushes a clockframe and calls
370 * forwarded_hardclock().
371 */
372	.text
373	SUPERALIGN_TEXT
374	.globl Xhardclock
375Xhardclock:
376	PUSH_FRAME
377	movl	$KDSEL, %eax	/* reload with kernel's data segment */
378	mov	%ax, %ds
379	mov	%ax, %es
380	movl	$KPSEL, %eax
381	mov	%ax, %fs
382
383	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
384
385	movl	PCPU(CURTHREAD),%ebx
386	cmpl	$0,TD_CRITNEST(%ebx)
387	je	1f
388	movl	$1,PCPU(INT_PENDING)
389	orl	$1,PCPU(SPENDING);
390	jmp	10f
3911:
392	incl	TD_INTR_NESTING_LEVEL(%ebx)
393	pushl	$0		/* XXX convert trapframe to clockframe */
394	call	forwarded_hardclock
395	addl	$4, %esp	/* XXX convert clockframe to trapframe */
396	decl	TD_INTR_NESTING_LEVEL(%ebx)
39710:
398	MEXITCOUNT
399	jmp	doreti
400
401/*
402 * Forward statclock to another CPU.  Pushes a clockframe and calls
403 * forwarded_statclock().
404 */
405	.text
406	SUPERALIGN_TEXT
407	.globl Xstatclock
408Xstatclock:
409	PUSH_FRAME
410	movl	$KDSEL, %eax	/* reload with kernel's data segment */
411	mov	%ax, %ds
412	mov	%ax, %es
413	movl	$KPSEL, %eax
414	mov	%ax, %fs
415
416	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
417
418	FAKE_MCOUNT(13*4(%esp))
419
420	movl	PCPU(CURTHREAD),%ebx
421	cmpl	$0,TD_CRITNEST(%ebx)
422	je	1f
423	movl	$1,PCPU(INT_PENDING)
424	orl	$2,PCPU(SPENDING);
425	jmp	10f
4261:
427	incl	TD_INTR_NESTING_LEVEL(%ebx)
428	pushl	$0		/* XXX convert trapframe to clockframe */
429	call	forwarded_statclock
430	addl	$4, %esp	/* XXX convert clockframe to trapframe */
431	decl	TD_INTR_NESTING_LEVEL(%ebx)
43210:
433	MEXITCOUNT
434	jmp	doreti
435
436/*
437 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
438 *
439 * The other CPU has already executed aston() or need_resched() on our
440 * current process, so we simply need to ack the interrupt and return
441 * via doreti to run ast().
442 */
443
444	.text
445	SUPERALIGN_TEXT
446	.globl Xcpuast
447Xcpuast:
448	PUSH_FRAME
449	movl	$KDSEL, %eax
450	mov	%ax, %ds		/* use KERNEL data segment */
451	mov	%ax, %es
452	movl	$KPSEL, %eax
453	mov	%ax, %fs
454
455	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
456
457	FAKE_MCOUNT(13*4(%esp))
458
459	MEXITCOUNT
460	jmp	doreti
461
462/*
463 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
464 *
465 *  - Signals its receipt.
466 *  - Waits for permission to restart.
467 *  - Signals its restart.
468 */
469	.text
470	SUPERALIGN_TEXT
471	.globl Xcpustop
472Xcpustop:
473	pushl	%ebp
474	movl	%esp, %ebp
475	pushl	%eax
476	pushl	%ecx
477	pushl	%edx
478	pushl	%ds			/* save current data segment */
479	pushl	%fs
480
481	movl	$KDSEL, %eax
482	mov	%ax, %ds		/* use KERNEL data segment */
483	movl	$KPSEL, %eax
484	mov	%ax, %fs
485
486	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
487
488	movl	PCPU(CPUID), %eax
489	imull	$PCB_SIZE, %eax
490	leal	CNAME(stoppcbs)(%eax), %eax
491	pushl	%eax
492	call	CNAME(savectx)		/* Save process context */
493	addl	$4, %esp
494
495	movl	PCPU(CPUID), %eax
496
497	lock
498	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4991:
500	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
501	jnc	1b
502
503	lock
504	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
505	lock
506	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
507
508	test	%eax, %eax
509	jnz	2f
510
511	movl	CNAME(cpustop_restartfunc), %eax
512	test	%eax, %eax
513	jz	2f
514	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
515
516	call	*%eax
5172:
518	popl	%fs
519	popl	%ds			/* restore previous data segment */
520	popl	%edx
521	popl	%ecx
522	popl	%eax
523	movl	%ebp, %esp
524	popl	%ebp
525	iret
526
527
528MCOUNT_LABEL(bintr)
529	FAST_INTR(0,fastintr0)
530	FAST_INTR(1,fastintr1)
531	FAST_INTR(2,fastintr2)
532	FAST_INTR(3,fastintr3)
533	FAST_INTR(4,fastintr4)
534	FAST_INTR(5,fastintr5)
535	FAST_INTR(6,fastintr6)
536	FAST_INTR(7,fastintr7)
537	FAST_INTR(8,fastintr8)
538	FAST_INTR(9,fastintr9)
539	FAST_INTR(10,fastintr10)
540	FAST_INTR(11,fastintr11)
541	FAST_INTR(12,fastintr12)
542	FAST_INTR(13,fastintr13)
543	FAST_INTR(14,fastintr14)
544	FAST_INTR(15,fastintr15)
545	FAST_INTR(16,fastintr16)
546	FAST_INTR(17,fastintr17)
547	FAST_INTR(18,fastintr18)
548	FAST_INTR(19,fastintr19)
549	FAST_INTR(20,fastintr20)
550	FAST_INTR(21,fastintr21)
551	FAST_INTR(22,fastintr22)
552	FAST_INTR(23,fastintr23)
553	FAST_INTR(24,fastintr24)
554	FAST_INTR(25,fastintr25)
555	FAST_INTR(26,fastintr26)
556	FAST_INTR(27,fastintr27)
557	FAST_INTR(28,fastintr28)
558	FAST_INTR(29,fastintr29)
559	FAST_INTR(30,fastintr30)
560	FAST_INTR(31,fastintr31)
561#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
562/* Threaded interrupts */
563	INTR(0,intr0, CLKINTR_PENDING)
564	INTR(1,intr1,)
565	INTR(2,intr2,)
566	INTR(3,intr3,)
567	INTR(4,intr4,)
568	INTR(5,intr5,)
569	INTR(6,intr6,)
570	INTR(7,intr7,)
571	INTR(8,intr8,)
572	INTR(9,intr9,)
573	INTR(10,intr10,)
574	INTR(11,intr11,)
575	INTR(12,intr12,)
576	INTR(13,intr13,)
577	INTR(14,intr14,)
578	INTR(15,intr15,)
579	INTR(16,intr16,)
580	INTR(17,intr17,)
581	INTR(18,intr18,)
582	INTR(19,intr19,)
583	INTR(20,intr20,)
584	INTR(21,intr21,)
585	INTR(22,intr22,)
586	INTR(23,intr23,)
587	INTR(24,intr24,)
588	INTR(25,intr25,)
589	INTR(26,intr26,)
590	INTR(27,intr27,)
591	INTR(28,intr28,)
592	INTR(29,intr29,)
593	INTR(30,intr30,)
594	INTR(31,intr31,)
595
596	FAST_UNPEND(0,fastunpend0)
597	FAST_UNPEND(1,fastunpend1)
598	FAST_UNPEND(2,fastunpend2)
599	FAST_UNPEND(3,fastunpend3)
600	FAST_UNPEND(4,fastunpend4)
601	FAST_UNPEND(5,fastunpend5)
602	FAST_UNPEND(6,fastunpend6)
603	FAST_UNPEND(7,fastunpend7)
604	FAST_UNPEND(8,fastunpend8)
605	FAST_UNPEND(9,fastunpend9)
606	FAST_UNPEND(10,fastunpend10)
607	FAST_UNPEND(11,fastunpend11)
608	FAST_UNPEND(12,fastunpend12)
609	FAST_UNPEND(13,fastunpend13)
610	FAST_UNPEND(14,fastunpend14)
611	FAST_UNPEND(15,fastunpend15)
612	FAST_UNPEND(16,fastunpend16)
613	FAST_UNPEND(17,fastunpend17)
614	FAST_UNPEND(18,fastunpend18)
615	FAST_UNPEND(19,fastunpend19)
616	FAST_UNPEND(20,fastunpend20)
617	FAST_UNPEND(21,fastunpend21)
618	FAST_UNPEND(22,fastunpend22)
619	FAST_UNPEND(23,fastunpend23)
620	FAST_UNPEND(24,fastunpend24)
621	FAST_UNPEND(25,fastunpend25)
622	FAST_UNPEND(26,fastunpend26)
623	FAST_UNPEND(27,fastunpend27)
624	FAST_UNPEND(28,fastunpend28)
625	FAST_UNPEND(29,fastunpend29)
626	FAST_UNPEND(30,fastunpend30)
627	FAST_UNPEND(31,fastunpend31)
628MCOUNT_LABEL(eintr)
629
630/*
631 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
632 *
633 * - Calls the generic rendezvous action function.
634 */
635	.text
636	SUPERALIGN_TEXT
637	.globl	Xrendezvous
638Xrendezvous:
639	PUSH_FRAME
640	movl	$KDSEL, %eax
641	mov	%ax, %ds		/* use KERNEL data segment */
642	mov	%ax, %es
643	movl	$KPSEL, %eax
644	mov	%ax, %fs
645
646	call	smp_rendezvous_action
647
648	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
649	POP_FRAME
650	iret
651
652#ifdef LAZY_SWITCH
653/*
654 * Clean up when we lose out on the lazy context switch optimization.
655 * ie: when we are about to release a PTD but a cpu is still borrowing it.
656 */
657	SUPERALIGN_TEXT
658	.globl	Xlazypmap
659Xlazypmap:
660	PUSH_FRAME
661	movl	$KDSEL, %eax
662	mov	%ax, %ds		/* use KERNEL data segment */
663	mov	%ax, %es
664	movl	$KPSEL, %eax
665	mov	%ax, %fs
666
667	call	pmap_lazyfix_action
668
669	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
670	POP_FRAME
671	iret
672#endif
673
674	.data
675
676	.globl	apic_pin_trigger
677apic_pin_trigger:
678	.long	0
679
680	.text
681