apic_vector.s revision 117372
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 117372 2003-07-10 01:02:59Z peter $
4 */
5
6#include <machine/apic.h>
7#include <machine/smp.h>
8
9/* convert an absolute IRQ# into a bitmask */
10#define IRQ_BIT(irq_num)	(1 << (irq_num))
11
12/* make an index into the IO APIC from the IRQ# */
13#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
14
15/*
16 *
17 */
18#define PUSH_FRAME							\
19	pushl	$0 ;		/* dummy error code */			\
20	pushl	$0 ;		/* dummy trap type */			\
21	pushal ;		/* 8 ints */				\
22	pushl	%ds ;		/* save data and extra segments ... */	\
23	pushl	%es ;							\
24	pushl	%fs
25
26#define PUSH_DUMMY							\
27	pushfl ;		/* eflags */				\
28	pushl	%cs ;		/* cs */				\
29	pushl	12(%esp) ;	/* original caller eip */		\
30	pushl	$0 ;		/* dummy error code */			\
31	pushl	$0 ;		/* dummy trap type */			\
32	subl	$11*4,%esp ;
33
34#define POP_FRAME							\
35	popl	%fs ;							\
36	popl	%es ;							\
37	popl	%ds ;							\
38	popal ;								\
39	addl	$4+4,%esp
40
41#define POP_DUMMY							\
42	addl	$16*4,%esp
43
44#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
45#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
46
47#define MASK_IRQ(irq_num)						\
48	ICU_LOCK ;				/* into critical reg */	\
49	testl	$IRQ_BIT(irq_num), apic_imen ;				\
50	jne	7f ;			/* masked, don't mask */	\
51	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
52	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
53	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
54	movl	%eax, (%ecx) ;			/* write the index */	\
55	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
56	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
57	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
587: ;						/* already masked */	\
59	ICU_UNLOCK
60/*
61 * Test to see whether we are handling an edge or level triggered INT.
62 *  Level-triggered INTs must still be masked as we don't clear the source,
63 *  and the EOI cycle would cause redundant INTs to occur.
64 */
65#define MASK_LEVEL_IRQ(irq_num)						\
66	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
67	jz	9f ;				/* edge, don't mask */	\
68	MASK_IRQ(irq_num) ;						\
699:
70
71
72#ifdef APIC_INTR_REORDER
73#define EOI_IRQ(irq_num)						\
74	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
75	movl	(%eax), %eax ;						\
76	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
77	jz	9f ;				/* not active */	\
78	movl	$0, lapic+LA_EOI ;					\
799:
80
81#else
82#define EOI_IRQ(irq_num)						\
83	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
84	jz	9f	;			/* not active */	\
85	movl	$0, lapic+LA_EOI;					\
869:
87#endif
88
89
90/*
91 * Test to see if the source is currently masked, clear if so.
92 */
93#define UNMASK_IRQ(irq_num)					\
94	ICU_LOCK ;				/* into critical reg */	\
95	testl	$IRQ_BIT(irq_num), apic_imen ;				\
96	je	7f ;			/* bit clear, not masked */	\
97	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
98	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
99	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
100	movl	%eax, (%ecx) ;			/* write the index */	\
101	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
102	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
103	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1047: ;						/* already unmasked */	\
105	ICU_UNLOCK
106
107/*
108 * Test to see whether we are handling an edge or level triggered INT.
109 *  Level-triggered INTs have to be unmasked.
110 */
111#define UNMASK_LEVEL_IRQ(irq_num)					\
112	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
113	jz	9f ;			/* edge, don't unmask */	\
114	UNMASK_IRQ(irq_num) ;						\
1159:
116
117/*
118 * Macros for interrupt entry, call to handler, and exit.
119 */
120
121#define	FAST_INTR(irq_num, vec_name)					\
122	.text ;								\
123	SUPERALIGN_TEXT ;						\
124IDTVEC(vec_name) ;							\
125	PUSH_FRAME ;							\
126	movl	$KDSEL,%eax ;						\
127	mov	%ax,%ds ;						\
128	mov	%ax,%es ;						\
129	movl	$KPSEL,%eax ;						\
130	mov	%ax,%fs ;						\
131	FAKE_MCOUNT(13*4(%esp)) ;					\
132	movl	PCPU(CURTHREAD),%ebx ;					\
133	cmpl	$0,TD_CRITNEST(%ebx) ;					\
134	je	1f ;							\
135;									\
136	movl	$1,PCPU(INT_PENDING) ;					\
137	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
138	MASK_LEVEL_IRQ(irq_num) ;					\
139	movl	$0, lapic+LA_EOI ;					\
140	jmp	10f ;							\
1411: ;									\
142	incl	TD_CRITNEST(%ebx) ;					\
143	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
144	pushl	intr_unit + (irq_num) * 4 ;				\
145	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
146	addl	$4, %esp ;						\
147	movl	$0, lapic+LA_EOI ;					\
148	lock ; 								\
149	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
150	movl	intr_countp + (irq_num) * 4, %eax ;			\
151	lock ; 								\
152	incl	(%eax) ;						\
153	decl	TD_CRITNEST(%ebx) ;					\
154	cmpl	$0,PCPU(INT_PENDING) ;					\
155	je	2f ;							\
156;									\
157	call	i386_unpend ;						\
1582: ;									\
159	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16010: ;									\
161	MEXITCOUNT ;							\
162	jmp	doreti
163
164/*
165 * Restart a fast interrupt that was held up by a critical section.
166 * This routine is called from unpend().  unpend() ensures we are
167 * in a critical section and deals with the interrupt nesting level
168 * for us.  If we previously masked the irq, we have to unmask it.
169 *
170 * We have a choice.  We can regenerate the irq using the 'int'
171 * instruction or we can create a dummy frame and call the interrupt
172 * handler directly.  I've chosen to use the dummy-frame method.
173 */
174#define	FAST_UNPEND(irq_num, vec_name)					\
175	.text ;								\
176	SUPERALIGN_TEXT ;						\
177IDTVEC(vec_name) ;							\
178;									\
179	pushl	%ebp ;							\
180	movl	%esp, %ebp ;						\
181	PUSH_DUMMY ;							\
182	pushl	intr_unit + (irq_num) * 4 ;				\
183	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
184	addl	$4, %esp ;						\
185	lock ; 								\
186	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
187	movl	intr_countp + (irq_num) * 4, %eax ;			\
188	lock ; 								\
189	incl	(%eax) ;						\
190	UNMASK_LEVEL_IRQ(irq_num) ;					\
191	POP_DUMMY ;							\
192	popl %ebp ;							\
193	ret ;								\
194
195
196/*
197 * Slow, threaded interrupts.
198 *
199 * XXX Most of the parameters here are obsolete.  Fix this when we're
200 * done.
201 * XXX we really shouldn't return via doreti if we just schedule the
202 * interrupt handler and don't run anything.  We could just do an
203 * iret.  FIXME.
204 */
205#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
206	.text ;								\
207	SUPERALIGN_TEXT ;						\
208/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
209IDTVEC(vec_name) ;							\
210	PUSH_FRAME ;							\
211	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
212	mov	%ax, %ds ;						\
213	mov	%ax, %es ;						\
214	movl	$KPSEL, %eax ;						\
215	mov	%ax, %fs ;						\
216;									\
217	maybe_extra_ipending ;						\
218;									\
219	MASK_LEVEL_IRQ(irq_num) ;					\
220	EOI_IRQ(irq_num) ;						\
221;									\
222	movl	PCPU(CURTHREAD),%ebx ;					\
223	cmpl	$0,TD_CRITNEST(%ebx) ;					\
224	je	1f ;							\
225	movl	$1,PCPU(INT_PENDING) ;					\
226	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
227	jmp	10f ;							\
2281: ;									\
229	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
230;	 								\
231	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
232	cmpl	$0,PCPU(INT_PENDING) ;					\
233	je	9f ;							\
234	call	i386_unpend ;						\
2359: ;									\
236	pushl	$irq_num;			/* pass the IRQ */	\
237	call	sched_ithd ;						\
238	addl	$4, %esp ;		/* discard the parameter */	\
239;									\
240	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
24110: ;									\
242	MEXITCOUNT ;							\
243	jmp	doreti
244
245/*
246 * Handle "spurious INTerrupts".
247 * Notes:
248 *  This is different than the "spurious INTerrupt" generated by an
249 *   8259 PIC for missing INTs.  See the APIC documentation for details.
250 *  This routine should NOT do an 'EOI' cycle.
251 */
252	.text
253	SUPERALIGN_TEXT
254IDTVEC(spuriousint)
255
256	/* No EOI cycle used here */
257
258	iret
259
260#ifdef SMP
261/*
262 * Global address space TLB shootdown.
263 */
264	.text
265	SUPERALIGN_TEXT
266IDTVEC(invltlb)
267	pushl	%eax
268	pushl	%ds
269	movl	$KDSEL, %eax		/* Kernel data selector */
270	mov	%ax, %ds
271
272#ifdef COUNT_XINVLTLB_HITS
273	pushl	%fs
274	movl	$KPSEL, %eax		/* Private space selector */
275	mov	%ax, %fs
276	movl	PCPU(CPUID), %eax
277	popl	%fs
278	incl	xhits_gbl(,%eax,4)
279#endif /* COUNT_XINVLTLB_HITS */
280
281	movl	%cr3, %eax		/* invalidate the TLB */
282	movl	%eax, %cr3
283
284	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
285
286	lock
287	incl	smp_tlb_wait
288
289	popl	%ds
290	popl	%eax
291	iret
292
293/*
294 * Single page TLB shootdown
295 */
296	.text
297	SUPERALIGN_TEXT
298IDTVEC(invlpg)
299	pushl	%eax
300	pushl	%ds
301	movl	$KDSEL, %eax		/* Kernel data selector */
302	mov	%ax, %ds
303
304#ifdef COUNT_XINVLTLB_HITS
305	pushl	%fs
306	movl	$KPSEL, %eax		/* Private space selector */
307	mov	%ax, %fs
308	movl	PCPU(CPUID), %eax
309	popl	%fs
310	incl	xhits_pg(,%eax,4)
311#endif /* COUNT_XINVLTLB_HITS */
312
313	movl	smp_tlb_addr1, %eax
314	invlpg	(%eax)			/* invalidate single page */
315
316	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
317
318	lock
319	incl	smp_tlb_wait
320
321	popl	%ds
322	popl	%eax
323	iret
324
325/*
326 * Page range TLB shootdown.
327 */
328	.text
329	SUPERALIGN_TEXT
330IDTVEC(invlrng)
331	pushl	%eax
332	pushl	%edx
333	pushl	%ds
334	movl	$KDSEL, %eax		/* Kernel data selector */
335	mov	%ax, %ds
336
337#ifdef COUNT_XINVLTLB_HITS
338	pushl	%fs
339	movl	$KPSEL, %eax		/* Private space selector */
340	mov	%ax, %fs
341	movl	PCPU(CPUID), %eax
342	popl	%fs
343	incl	xhits_rng(,%eax,4)
344#endif /* COUNT_XINVLTLB_HITS */
345
346	movl	smp_tlb_addr1, %edx
347	movl	smp_tlb_addr2, %eax
3481:	invlpg	(%edx)			/* invalidate single page */
349	addl	$PAGE_SIZE, %edx
350	cmpl	%eax, %edx
351	jb	1b
352
353	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
354
355	lock
356	incl	smp_tlb_wait
357
358	popl	%ds
359	popl	%edx
360	popl	%eax
361	iret
362
363/*
364 * Forward hardclock to another CPU.  Pushes a clockframe and calls
365 * forwarded_hardclock().
366 */
367	.text
368	SUPERALIGN_TEXT
369IDTVEC(hardclock)
370	PUSH_FRAME
371	movl	$KDSEL, %eax	/* reload with kernel's data segment */
372	mov	%ax, %ds
373	mov	%ax, %es
374	movl	$KPSEL, %eax
375	mov	%ax, %fs
376
377	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
378
379	movl	PCPU(CURTHREAD),%ebx
380	cmpl	$0,TD_CRITNEST(%ebx)
381	je	1f
382	movl	$1,PCPU(INT_PENDING)
383	orl	$1,PCPU(SPENDING);
384	jmp	10f
3851:
386	incl	TD_INTR_NESTING_LEVEL(%ebx)
387	pushl	$0		/* XXX convert trapframe to clockframe */
388	call	forwarded_hardclock
389	addl	$4, %esp	/* XXX convert clockframe to trapframe */
390	decl	TD_INTR_NESTING_LEVEL(%ebx)
39110:
392	MEXITCOUNT
393	jmp	doreti
394
395/*
396 * Forward statclock to another CPU.  Pushes a clockframe and calls
397 * forwarded_statclock().
398 */
399	.text
400	SUPERALIGN_TEXT
401IDTVEC(statclock)
402	PUSH_FRAME
403	movl	$KDSEL, %eax	/* reload with kernel's data segment */
404	mov	%ax, %ds
405	mov	%ax, %es
406	movl	$KPSEL, %eax
407	mov	%ax, %fs
408
409	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
410
411	FAKE_MCOUNT(13*4(%esp))
412
413	movl	PCPU(CURTHREAD),%ebx
414	cmpl	$0,TD_CRITNEST(%ebx)
415	je	1f
416	movl	$1,PCPU(INT_PENDING)
417	orl	$2,PCPU(SPENDING);
418	jmp	10f
4191:
420	incl	TD_INTR_NESTING_LEVEL(%ebx)
421	pushl	$0		/* XXX convert trapframe to clockframe */
422	call	forwarded_statclock
423	addl	$4, %esp	/* XXX convert clockframe to trapframe */
424	decl	TD_INTR_NESTING_LEVEL(%ebx)
42510:
426	MEXITCOUNT
427	jmp	doreti
428
429/*
430 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
431 *
432 * The other CPU has already executed aston() or need_resched() on our
433 * current process, so we simply need to ack the interrupt and return
434 * via doreti to run ast().
435 */
436
437	.text
438	SUPERALIGN_TEXT
439IDTVEC(cpuast)
440	PUSH_FRAME
441	movl	$KDSEL, %eax
442	mov	%ax, %ds		/* use KERNEL data segment */
443	mov	%ax, %es
444	movl	$KPSEL, %eax
445	mov	%ax, %fs
446
447	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
448
449	FAKE_MCOUNT(13*4(%esp))
450
451	MEXITCOUNT
452	jmp	doreti
453
454/*
455 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
456 *
457 *  - Signals its receipt.
458 *  - Waits for permission to restart.
459 *  - Signals its restart.
460 */
461	.text
462	SUPERALIGN_TEXT
463IDTVEC(cpustop)
464	pushl	%ebp
465	movl	%esp, %ebp
466	pushl	%eax
467	pushl	%ecx
468	pushl	%edx
469	pushl	%ds			/* save current data segment */
470	pushl	%fs
471
472	movl	$KDSEL, %eax
473	mov	%ax, %ds		/* use KERNEL data segment */
474	movl	$KPSEL, %eax
475	mov	%ax, %fs
476
477	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
478
479	movl	PCPU(CPUID), %eax
480	imull	$PCB_SIZE, %eax
481	leal	CNAME(stoppcbs)(%eax), %eax
482	pushl	%eax
483	call	CNAME(savectx)		/* Save process context */
484	addl	$4, %esp
485
486	movl	PCPU(CPUID), %eax
487
488	lock
489	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4901:
491	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
492	jnc	1b
493
494	lock
495	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
496	lock
497	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
498
499	test	%eax, %eax
500	jnz	2f
501
502	movl	CNAME(cpustop_restartfunc), %eax
503	test	%eax, %eax
504	jz	2f
505	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
506
507	call	*%eax
5082:
509	popl	%fs
510	popl	%ds			/* restore previous data segment */
511	popl	%edx
512	popl	%ecx
513	popl	%eax
514	movl	%ebp, %esp
515	popl	%ebp
516	iret
517
518#endif /* SMP */
519
520MCOUNT_LABEL(bintr)
521	FAST_INTR(0,fastintr0)
522	FAST_INTR(1,fastintr1)
523	FAST_INTR(2,fastintr2)
524	FAST_INTR(3,fastintr3)
525	FAST_INTR(4,fastintr4)
526	FAST_INTR(5,fastintr5)
527	FAST_INTR(6,fastintr6)
528	FAST_INTR(7,fastintr7)
529	FAST_INTR(8,fastintr8)
530	FAST_INTR(9,fastintr9)
531	FAST_INTR(10,fastintr10)
532	FAST_INTR(11,fastintr11)
533	FAST_INTR(12,fastintr12)
534	FAST_INTR(13,fastintr13)
535	FAST_INTR(14,fastintr14)
536	FAST_INTR(15,fastintr15)
537	FAST_INTR(16,fastintr16)
538	FAST_INTR(17,fastintr17)
539	FAST_INTR(18,fastintr18)
540	FAST_INTR(19,fastintr19)
541	FAST_INTR(20,fastintr20)
542	FAST_INTR(21,fastintr21)
543	FAST_INTR(22,fastintr22)
544	FAST_INTR(23,fastintr23)
545	FAST_INTR(24,fastintr24)
546	FAST_INTR(25,fastintr25)
547	FAST_INTR(26,fastintr26)
548	FAST_INTR(27,fastintr27)
549	FAST_INTR(28,fastintr28)
550	FAST_INTR(29,fastintr29)
551	FAST_INTR(30,fastintr30)
552	FAST_INTR(31,fastintr31)
553#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
554/* Threaded interrupts */
555	INTR(0,intr0, CLKINTR_PENDING)
556	INTR(1,intr1,)
557	INTR(2,intr2,)
558	INTR(3,intr3,)
559	INTR(4,intr4,)
560	INTR(5,intr5,)
561	INTR(6,intr6,)
562	INTR(7,intr7,)
563	INTR(8,intr8,)
564	INTR(9,intr9,)
565	INTR(10,intr10,)
566	INTR(11,intr11,)
567	INTR(12,intr12,)
568	INTR(13,intr13,)
569	INTR(14,intr14,)
570	INTR(15,intr15,)
571	INTR(16,intr16,)
572	INTR(17,intr17,)
573	INTR(18,intr18,)
574	INTR(19,intr19,)
575	INTR(20,intr20,)
576	INTR(21,intr21,)
577	INTR(22,intr22,)
578	INTR(23,intr23,)
579	INTR(24,intr24,)
580	INTR(25,intr25,)
581	INTR(26,intr26,)
582	INTR(27,intr27,)
583	INTR(28,intr28,)
584	INTR(29,intr29,)
585	INTR(30,intr30,)
586	INTR(31,intr31,)
587
588	FAST_UNPEND(0,fastunpend0)
589	FAST_UNPEND(1,fastunpend1)
590	FAST_UNPEND(2,fastunpend2)
591	FAST_UNPEND(3,fastunpend3)
592	FAST_UNPEND(4,fastunpend4)
593	FAST_UNPEND(5,fastunpend5)
594	FAST_UNPEND(6,fastunpend6)
595	FAST_UNPEND(7,fastunpend7)
596	FAST_UNPEND(8,fastunpend8)
597	FAST_UNPEND(9,fastunpend9)
598	FAST_UNPEND(10,fastunpend10)
599	FAST_UNPEND(11,fastunpend11)
600	FAST_UNPEND(12,fastunpend12)
601	FAST_UNPEND(13,fastunpend13)
602	FAST_UNPEND(14,fastunpend14)
603	FAST_UNPEND(15,fastunpend15)
604	FAST_UNPEND(16,fastunpend16)
605	FAST_UNPEND(17,fastunpend17)
606	FAST_UNPEND(18,fastunpend18)
607	FAST_UNPEND(19,fastunpend19)
608	FAST_UNPEND(20,fastunpend20)
609	FAST_UNPEND(21,fastunpend21)
610	FAST_UNPEND(22,fastunpend22)
611	FAST_UNPEND(23,fastunpend23)
612	FAST_UNPEND(24,fastunpend24)
613	FAST_UNPEND(25,fastunpend25)
614	FAST_UNPEND(26,fastunpend26)
615	FAST_UNPEND(27,fastunpend27)
616	FAST_UNPEND(28,fastunpend28)
617	FAST_UNPEND(29,fastunpend29)
618	FAST_UNPEND(30,fastunpend30)
619	FAST_UNPEND(31,fastunpend31)
620MCOUNT_LABEL(eintr)
621
622#ifdef SMP
623/*
624 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
625 *
626 * - Calls the generic rendezvous action function.
627 */
628	.text
629	SUPERALIGN_TEXT
630IDTVEC(rendezvous)
631	PUSH_FRAME
632	movl	$KDSEL, %eax
633	mov	%ax, %ds		/* use KERNEL data segment */
634	mov	%ax, %es
635	movl	$KPSEL, %eax
636	mov	%ax, %fs
637
638	call	smp_rendezvous_action
639
640	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
641	POP_FRAME
642	iret
643
644/*
645 * Clean up when we lose out on the lazy context switch optimization.
646 * ie: when we are about to release a PTD but a cpu is still borrowing it.
647 */
648	SUPERALIGN_TEXT
649IDTVEC(lazypmap)
650	PUSH_FRAME
651	movl	$KDSEL, %eax
652	mov	%ax, %ds		/* use KERNEL data segment */
653	mov	%ax, %es
654	movl	$KPSEL, %eax
655	mov	%ax, %fs
656
657	call	pmap_lazyfix_action
658
659	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
660	POP_FRAME
661	iret
662#endif /* SMP */
663
664	.data
665
666	.globl	apic_pin_trigger
667apic_pin_trigger:
668	.long	0
669
670	.text
671