apic_vector.s revision 110296
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 110296 2003-02-03 17:53:15Z jake $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10/* convert an absolute IRQ# into a bitmask */
11#define IRQ_BIT(irq_num)	(1 << (irq_num))
12
13/* make an index into the IO APIC from the IRQ# */
14#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
15
16/*
17 *
18 */
19#define PUSH_FRAME							\
20	pushl	$0 ;		/* dummy error code */			\
21	pushl	$0 ;		/* dummy trap type */			\
22	pushal ;		/* 8 ints */				\
23	pushl	%ds ;		/* save data and extra segments ... */	\
24	pushl	%es ;							\
25	pushl	%fs
26
27#define PUSH_DUMMY							\
28	pushfl ;		/* eflags */				\
29	pushl	%cs ;		/* cs */				\
30	pushl	12(%esp) ;	/* original caller eip */		\
31	pushl	$0 ;		/* dummy error code */			\
32	pushl	$0 ;		/* dummy trap type */			\
33	subl	$11*4,%esp ;
34
35#define POP_FRAME							\
36	popl	%fs ;							\
37	popl	%es ;							\
38	popl	%ds ;							\
39	popal ;								\
40	addl	$4+4,%esp
41
42#define POP_DUMMY							\
43	addl	$16*4,%esp
44
45#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
46#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
47
48#define MASK_IRQ(irq_num)						\
49	ICU_LOCK ;				/* into critical reg */	\
50	testl	$IRQ_BIT(irq_num), apic_imen ;				\
51	jne	7f ;			/* masked, don't mask */	\
52	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
53	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
54	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
55	movl	%eax, (%ecx) ;			/* write the index */	\
56	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
57	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
58	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
597: ;						/* already masked */	\
60	ICU_UNLOCK
61/*
62 * Test to see whether we are handling an edge or level triggered INT.
63 *  Level-triggered INTs must still be masked as we don't clear the source,
64 *  and the EOI cycle would cause redundant INTs to occur.
65 */
66#define MASK_LEVEL_IRQ(irq_num)						\
67	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
68	jz	9f ;				/* edge, don't mask */	\
69	MASK_IRQ(irq_num) ;						\
709:
71
72
73#ifdef APIC_INTR_REORDER
74#define EOI_IRQ(irq_num)						\
75	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
76	movl	(%eax), %eax ;						\
77	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
78	jz	9f ;				/* not active */	\
79	movl	$0, lapic+LA_EOI ;					\
809:
81
82#else
83#define EOI_IRQ(irq_num)						\
84	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
85	jz	9f	;			/* not active */	\
86	movl	$0, lapic+LA_EOI;					\
879:
88#endif
89
90
91/*
92 * Test to see if the source is currently masked, clear if so.
93 */
94#define UNMASK_IRQ(irq_num)					\
95	ICU_LOCK ;				/* into critical reg */	\
96	testl	$IRQ_BIT(irq_num), apic_imen ;				\
97	je	7f ;			/* bit clear, not masked */	\
98	andl	$~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */	\
99	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
100	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
101	movl	%eax, (%ecx) ;			/* write the index */	\
102	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
103	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
104	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1057: ;						/* already unmasked */	\
106	ICU_UNLOCK
107
108/*
109 * Test to see whether we are handling an edge or level triggered INT.
110 *  Level-triggered INTs have to be unmasked.
111 */
112#define UNMASK_LEVEL_IRQ(irq_num)					\
113	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
114	jz	9f ;			/* edge, don't unmask */	\
115	UNMASK_IRQ(irq_num) ;						\
1169:
117
118/*
119 * Macros for interrupt entry, call to handler, and exit.
120 */
121
122#define	FAST_INTR(irq_num, vec_name)					\
123	.text ;								\
124	SUPERALIGN_TEXT ;						\
125IDTVEC(vec_name) ;							\
126	PUSH_FRAME ;							\
127	movl	$KDSEL,%eax ;						\
128	mov	%ax,%ds ;						\
129	mov	%ax,%es ;						\
130	movl	$KPSEL,%eax ;						\
131	mov	%ax,%fs ;						\
132	FAKE_MCOUNT(13*4(%esp)) ;					\
133	movl	PCPU(CURTHREAD),%ebx ;					\
134	cmpl	$0,TD_CRITNEST(%ebx) ;					\
135	je	1f ;							\
136;									\
137	movl	$1,PCPU(INT_PENDING) ;					\
138	orl	$IRQ_BIT(irq_num),PCPU(FPENDING) ;			\
139	MASK_LEVEL_IRQ(irq_num) ;					\
140	movl	$0, lapic+LA_EOI ;					\
141	jmp	10f ;							\
1421: ;									\
143	incl	TD_CRITNEST(%ebx) ;					\
144	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
145	pushl	intr_unit + (irq_num) * 4 ;				\
146	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
147	addl	$4, %esp ;						\
148	movl	$0, lapic+LA_EOI ;					\
149	lock ; 								\
150	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
151	movl	intr_countp + (irq_num) * 4, %eax ;			\
152	lock ; 								\
153	incl	(%eax) ;						\
154	decl	TD_CRITNEST(%ebx) ;					\
155	cmpl	$0,PCPU(INT_PENDING) ;					\
156	je	2f ;							\
157;									\
158	call	i386_unpend ;						\
1592: ;									\
160	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
16110: ;									\
162	MEXITCOUNT ;							\
163	jmp	doreti
164
165/*
166 * Restart a fast interrupt that was held up by a critical section.
167 * This routine is called from unpend().  unpend() ensures we are
168 * in a critical section and deals with the interrupt nesting level
169 * for us.  If we previously masked the irq, we have to unmask it.
170 *
171 * We have a choice.  We can regenerate the irq using the 'int'
172 * instruction or we can create a dummy frame and call the interrupt
173 * handler directly.  I've chosen to use the dummy-frame method.
174 */
175#define	FAST_UNPEND(irq_num, vec_name)					\
176	.text ;								\
177	SUPERALIGN_TEXT ;						\
178IDTVEC(vec_name) ;							\
179;									\
180	pushl	%ebp ;							\
181	movl	%esp, %ebp ;						\
182	PUSH_DUMMY ;							\
183	pushl	intr_unit + (irq_num) * 4 ;				\
184	call	*intr_handler + (irq_num) * 4 ;	/* do the work ASAP */	\
185	addl	$4, %esp ;						\
186	lock ; 								\
187	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
188	movl	intr_countp + (irq_num) * 4, %eax ;			\
189	lock ; 								\
190	incl	(%eax) ;						\
191	UNMASK_LEVEL_IRQ(irq_num) ;					\
192	POP_DUMMY ;							\
193	popl %ebp ;							\
194	ret ;								\
195
196
197/*
198 * Slow, threaded interrupts.
199 *
200 * XXX Most of the parameters here are obsolete.  Fix this when we're
201 * done.
202 * XXX we really shouldn't return via doreti if we just schedule the
203 * interrupt handler and don't run anything.  We could just do an
204 * iret.  FIXME.
205 */
206#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
207	.text ;								\
208	SUPERALIGN_TEXT ;						\
209/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
210IDTVEC(vec_name) ;							\
211	PUSH_FRAME ;							\
212	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
213	mov	%ax, %ds ;						\
214	mov	%ax, %es ;						\
215	movl	$KPSEL, %eax ;						\
216	mov	%ax, %fs ;						\
217;									\
218	maybe_extra_ipending ;						\
219;									\
220	MASK_LEVEL_IRQ(irq_num) ;					\
221	EOI_IRQ(irq_num) ;						\
222;									\
223	movl	PCPU(CURTHREAD),%ebx ;					\
224	cmpl	$0,TD_CRITNEST(%ebx) ;					\
225	je	1f ;							\
226	movl	$1,PCPU(INT_PENDING) ;					\
227	orl	$IRQ_BIT(irq_num),PCPU(IPENDING) ;			\
228	jmp	10f ;							\
2291: ;									\
230	incl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
231;	 								\
232	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
233	cmpl	$0,PCPU(INT_PENDING) ;					\
234	je	9f ;							\
235	call	i386_unpend ;						\
2369: ;									\
237	pushl	$irq_num;			/* pass the IRQ */	\
238	call	sched_ithd ;						\
239	addl	$4, %esp ;		/* discard the parameter */	\
240;									\
241	decl	TD_INTR_NESTING_LEVEL(%ebx) ;				\
24210: ;									\
243	MEXITCOUNT ;							\
244	jmp	doreti
245
246/*
247 * Handle "spurious INTerrupts".
248 * Notes:
249 *  This is different than the "spurious INTerrupt" generated by an
250 *   8259 PIC for missing INTs.  See the APIC documentation for details.
251 *  This routine should NOT do an 'EOI' cycle.
252 */
253	.text
254	SUPERALIGN_TEXT
255	.globl Xspuriousint
256Xspuriousint:
257
258	/* No EOI cycle used here */
259
260	iret
261
262/*
263 * Global address space TLB shootdown.
264 */
265	.text
266	SUPERALIGN_TEXT
267	.globl	Xinvltlb
268Xinvltlb:
269	pushl	%eax
270	pushl	%ds
271	movl	$KDSEL, %eax		/* Kernel data selector */
272	mov	%ax, %ds
273
274#ifdef COUNT_XINVLTLB_HITS
275	pushl	%fs
276	movl	$KPSEL, %eax		/* Private space selector */
277	mov	%ax, %fs
278	movl	PCPU(CPUID), %eax
279	popl	%fs
280	incl	xhits_gbl(,%eax,4)
281#endif /* COUNT_XINVLTLB_HITS */
282
283	movl	%cr3, %eax		/* invalidate the TLB */
284	movl	%eax, %cr3
285
286	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
287
288	lock
289	incl	smp_tlb_wait
290
291	popl	%ds
292	popl	%eax
293	iret
294
295/*
296 * Single page TLB shootdown
297 */
298	.text
299	SUPERALIGN_TEXT
300	.globl	Xinvlpg
301Xinvlpg:
302	pushl	%eax
303	pushl	%ds
304	movl	$KDSEL, %eax		/* Kernel data selector */
305	mov	%ax, %ds
306
307#ifdef COUNT_XINVLTLB_HITS
308	pushl	%fs
309	movl	$KPSEL, %eax		/* Private space selector */
310	mov	%ax, %fs
311	movl	PCPU(CPUID), %eax
312	popl	%fs
313	incl	xhits_pg(,%eax,4)
314#endif /* COUNT_XINVLTLB_HITS */
315
316	movl	smp_tlb_addr1, %eax
317	invlpg	(%eax)			/* invalidate single page */
318
319	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
320
321	lock
322	incl	smp_tlb_wait
323
324	popl	%ds
325	popl	%eax
326	iret
327
328/*
329 * Page range TLB shootdown.
330 */
331	.text
332	SUPERALIGN_TEXT
333	.globl	Xinvlrng
334Xinvlrng:
335	pushl	%eax
336	pushl	%edx
337	pushl	%ds
338	movl	$KDSEL, %eax		/* Kernel data selector */
339	mov	%ax, %ds
340
341#ifdef COUNT_XINVLTLB_HITS
342	pushl	%fs
343	movl	$KPSEL, %eax		/* Private space selector */
344	mov	%ax, %fs
345	movl	PCPU(CPUID), %eax
346	popl	%fs
347	incl	xhits_rng(,%eax,4)
348#endif /* COUNT_XINVLTLB_HITS */
349
350	movl	smp_tlb_addr1, %edx
351	movl	smp_tlb_addr2, %eax
3521:	invlpg	(%edx)			/* invalidate single page */
353	addl	$PAGE_SIZE, %edx
354	cmpl	%eax, %edx
355	jb	1b
356
357	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
358
359	lock
360	incl	smp_tlb_wait
361
362	popl	%ds
363	popl	%edx
364	popl	%eax
365	iret
366
367/*
368 * Forward hardclock to another CPU.  Pushes a clockframe and calls
369 * forwarded_hardclock().
370 */
371	.text
372	SUPERALIGN_TEXT
373	.globl Xhardclock
374Xhardclock:
375	PUSH_FRAME
376	movl	$KDSEL, %eax	/* reload with kernel's data segment */
377	mov	%ax, %ds
378	mov	%ax, %es
379	movl	$KPSEL, %eax
380	mov	%ax, %fs
381
382	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
383
384	movl	PCPU(CURTHREAD),%ebx
385	cmpl	$0,TD_CRITNEST(%ebx)
386	je	1f
387	movl	$1,PCPU(INT_PENDING)
388	orl	$1,PCPU(SPENDING);
389	jmp	10f
3901:
391	incl	TD_INTR_NESTING_LEVEL(%ebx)
392	pushl	$0		/* XXX convert trapframe to clockframe */
393	call	forwarded_hardclock
394	addl	$4, %esp	/* XXX convert clockframe to trapframe */
395	decl	TD_INTR_NESTING_LEVEL(%ebx)
39610:
397	MEXITCOUNT
398	jmp	doreti
399
400/*
401 * Forward statclock to another CPU.  Pushes a clockframe and calls
402 * forwarded_statclock().
403 */
404	.text
405	SUPERALIGN_TEXT
406	.globl Xstatclock
407Xstatclock:
408	PUSH_FRAME
409	movl	$KDSEL, %eax	/* reload with kernel's data segment */
410	mov	%ax, %ds
411	mov	%ax, %es
412	movl	$KPSEL, %eax
413	mov	%ax, %fs
414
415	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
416
417	FAKE_MCOUNT(13*4(%esp))
418
419	movl	PCPU(CURTHREAD),%ebx
420	cmpl	$0,TD_CRITNEST(%ebx)
421	je	1f
422	movl	$1,PCPU(INT_PENDING)
423	orl	$2,PCPU(SPENDING);
424	jmp	10f
4251:
426	incl	TD_INTR_NESTING_LEVEL(%ebx)
427	pushl	$0		/* XXX convert trapframe to clockframe */
428	call	forwarded_statclock
429	addl	$4, %esp	/* XXX convert clockframe to trapframe */
430	decl	TD_INTR_NESTING_LEVEL(%ebx)
43110:
432	MEXITCOUNT
433	jmp	doreti
434
435/*
436 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
437 *
438 * The other CPU has already executed aston() or need_resched() on our
439 * current process, so we simply need to ack the interrupt and return
440 * via doreti to run ast().
441 */
442
443	.text
444	SUPERALIGN_TEXT
445	.globl Xcpuast
446Xcpuast:
447	PUSH_FRAME
448	movl	$KDSEL, %eax
449	mov	%ax, %ds		/* use KERNEL data segment */
450	mov	%ax, %es
451	movl	$KPSEL, %eax
452	mov	%ax, %fs
453
454	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
455
456	FAKE_MCOUNT(13*4(%esp))
457
458	MEXITCOUNT
459	jmp	doreti
460
461/*
462 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
463 *
464 *  - Signals its receipt.
465 *  - Waits for permission to restart.
466 *  - Signals its restart.
467 */
468	.text
469	SUPERALIGN_TEXT
470	.globl Xcpustop
471Xcpustop:
472	pushl	%ebp
473	movl	%esp, %ebp
474	pushl	%eax
475	pushl	%ecx
476	pushl	%edx
477	pushl	%ds			/* save current data segment */
478	pushl	%fs
479
480	movl	$KDSEL, %eax
481	mov	%ax, %ds		/* use KERNEL data segment */
482	movl	$KPSEL, %eax
483	mov	%ax, %fs
484
485	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
486
487	movl	PCPU(CPUID), %eax
488	imull	$PCB_SIZE, %eax
489	leal	CNAME(stoppcbs)(%eax), %eax
490	pushl	%eax
491	call	CNAME(savectx)		/* Save process context */
492	addl	$4, %esp
493
494	movl	PCPU(CPUID), %eax
495
496	lock
497	btsl	%eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
4981:
499	btl	%eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
500	jnc	1b
501
502	lock
503	btrl	%eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
504	lock
505	btrl	%eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
506
507	test	%eax, %eax
508	jnz	2f
509
510	movl	CNAME(cpustop_restartfunc), %eax
511	test	%eax, %eax
512	jz	2f
513	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
514
515	call	*%eax
5162:
517	popl	%fs
518	popl	%ds			/* restore previous data segment */
519	popl	%edx
520	popl	%ecx
521	popl	%eax
522	movl	%ebp, %esp
523	popl	%ebp
524	iret
525
526
527MCOUNT_LABEL(bintr)
528	FAST_INTR(0,fastintr0)
529	FAST_INTR(1,fastintr1)
530	FAST_INTR(2,fastintr2)
531	FAST_INTR(3,fastintr3)
532	FAST_INTR(4,fastintr4)
533	FAST_INTR(5,fastintr5)
534	FAST_INTR(6,fastintr6)
535	FAST_INTR(7,fastintr7)
536	FAST_INTR(8,fastintr8)
537	FAST_INTR(9,fastintr9)
538	FAST_INTR(10,fastintr10)
539	FAST_INTR(11,fastintr11)
540	FAST_INTR(12,fastintr12)
541	FAST_INTR(13,fastintr13)
542	FAST_INTR(14,fastintr14)
543	FAST_INTR(15,fastintr15)
544	FAST_INTR(16,fastintr16)
545	FAST_INTR(17,fastintr17)
546	FAST_INTR(18,fastintr18)
547	FAST_INTR(19,fastintr19)
548	FAST_INTR(20,fastintr20)
549	FAST_INTR(21,fastintr21)
550	FAST_INTR(22,fastintr22)
551	FAST_INTR(23,fastintr23)
552	FAST_INTR(24,fastintr24)
553	FAST_INTR(25,fastintr25)
554	FAST_INTR(26,fastintr26)
555	FAST_INTR(27,fastintr27)
556	FAST_INTR(28,fastintr28)
557	FAST_INTR(29,fastintr29)
558	FAST_INTR(30,fastintr30)
559	FAST_INTR(31,fastintr31)
560#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
561/* Threaded interrupts */
562	INTR(0,intr0, CLKINTR_PENDING)
563	INTR(1,intr1,)
564	INTR(2,intr2,)
565	INTR(3,intr3,)
566	INTR(4,intr4,)
567	INTR(5,intr5,)
568	INTR(6,intr6,)
569	INTR(7,intr7,)
570	INTR(8,intr8,)
571	INTR(9,intr9,)
572	INTR(10,intr10,)
573	INTR(11,intr11,)
574	INTR(12,intr12,)
575	INTR(13,intr13,)
576	INTR(14,intr14,)
577	INTR(15,intr15,)
578	INTR(16,intr16,)
579	INTR(17,intr17,)
580	INTR(18,intr18,)
581	INTR(19,intr19,)
582	INTR(20,intr20,)
583	INTR(21,intr21,)
584	INTR(22,intr22,)
585	INTR(23,intr23,)
586	INTR(24,intr24,)
587	INTR(25,intr25,)
588	INTR(26,intr26,)
589	INTR(27,intr27,)
590	INTR(28,intr28,)
591	INTR(29,intr29,)
592	INTR(30,intr30,)
593	INTR(31,intr31,)
594
595	FAST_UNPEND(0,fastunpend0)
596	FAST_UNPEND(1,fastunpend1)
597	FAST_UNPEND(2,fastunpend2)
598	FAST_UNPEND(3,fastunpend3)
599	FAST_UNPEND(4,fastunpend4)
600	FAST_UNPEND(5,fastunpend5)
601	FAST_UNPEND(6,fastunpend6)
602	FAST_UNPEND(7,fastunpend7)
603	FAST_UNPEND(8,fastunpend8)
604	FAST_UNPEND(9,fastunpend9)
605	FAST_UNPEND(10,fastunpend10)
606	FAST_UNPEND(11,fastunpend11)
607	FAST_UNPEND(12,fastunpend12)
608	FAST_UNPEND(13,fastunpend13)
609	FAST_UNPEND(14,fastunpend14)
610	FAST_UNPEND(15,fastunpend15)
611	FAST_UNPEND(16,fastunpend16)
612	FAST_UNPEND(17,fastunpend17)
613	FAST_UNPEND(18,fastunpend18)
614	FAST_UNPEND(19,fastunpend19)
615	FAST_UNPEND(20,fastunpend20)
616	FAST_UNPEND(21,fastunpend21)
617	FAST_UNPEND(22,fastunpend22)
618	FAST_UNPEND(23,fastunpend23)
619	FAST_UNPEND(24,fastunpend24)
620	FAST_UNPEND(25,fastunpend25)
621	FAST_UNPEND(26,fastunpend26)
622	FAST_UNPEND(27,fastunpend27)
623	FAST_UNPEND(28,fastunpend28)
624	FAST_UNPEND(29,fastunpend29)
625	FAST_UNPEND(30,fastunpend30)
626	FAST_UNPEND(31,fastunpend31)
627MCOUNT_LABEL(eintr)
628
629/*
630 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
631 *
632 * - Calls the generic rendezvous action function.
633 */
634	.text
635	SUPERALIGN_TEXT
636	.globl	Xrendezvous
637Xrendezvous:
638	PUSH_FRAME
639	movl	$KDSEL, %eax
640	mov	%ax, %ds		/* use KERNEL data segment */
641	mov	%ax, %es
642	movl	$KPSEL, %eax
643	mov	%ax, %fs
644
645	call	smp_rendezvous_action
646
647	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
648	POP_FRAME
649	iret
650
651
652	.data
653
654	.globl	apic_pin_trigger
655apic_pin_trigger:
656	.long	0
657
658	.text
659