apic_vector.s revision 66698
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 66698 2000-10-05 23:09:57Z jhb $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18
19/*
20 * Macros for interrupt entry, call to handler, and exit.
21 */
22
23#define	FAST_INTR(irq_num, vec_name)					\
24	.text ;								\
25	SUPERALIGN_TEXT ;						\
26IDTVEC(vec_name) ;							\
27	pushl	%eax ;		/* save only call-used registers */	\
28	pushl	%ecx ;							\
29	pushl	%edx ;							\
30	pushl	%ds ;							\
31	MAYBE_PUSHL_ES ;						\
32	pushl	%fs ;							\
33	movl	$KDSEL,%eax ;						\
34	mov	%ax,%ds ;						\
35	MAYBE_MOVW_AX_ES ;						\
36	movl	$KPSEL,%eax ;						\
37	mov	%ax,%fs ;						\
38	FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;			\
39	pushl	_intr_unit + (irq_num) * 4 ;				\
40	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
41	addl	$4, %esp ;						\
42	movl	$0, lapic_eoi ;						\
43	lock ; 								\
44	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
45	movl	_intr_countp + (irq_num) * 4, %eax ;			\
46	lock ; 								\
47	incl	(%eax) ;						\
48	MEXITCOUNT ;							\
49	popl	%fs ;							\
50	MAYBE_POPL_ES ;							\
51	popl	%ds ;							\
52	popl	%edx ;							\
53	popl	%ecx ;							\
54	popl	%eax ;							\
55	iret
56
57/*
58 *
59 */
60#define PUSH_FRAME							\
61	pushl	$0 ;		/* dummy error code */			\
62	pushl	$0 ;		/* dummy trap type */			\
63	pushal ;							\
64	pushl	%ds ;		/* save data and extra segments ... */	\
65	pushl	%es ;							\
66	pushl	%fs
67
68#define POP_FRAME							\
69	popl	%fs ;							\
70	popl	%es ;							\
71	popl	%ds ;							\
72	popal ;								\
73	addl	$4+4,%esp
74
75#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
76#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
77
78#define MASK_IRQ(irq_num)						\
79	IMASK_LOCK ;				/* into critical reg */	\
80	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
81	jne	7f ;			/* masked, don't mask */	\
82	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
83	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
84	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
85	movl	%eax, (%ecx) ;			/* write the index */	\
86	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
87	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
88	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
897: ;						/* already masked */	\
90	IMASK_UNLOCK
91/*
92 * Test to see whether we are handling an edge or level triggered INT.
93 *  Level-triggered INTs must still be masked as we don't clear the source,
94 *  and the EOI cycle would cause redundant INTs to occur.
95 */
96#define MASK_LEVEL_IRQ(irq_num)						\
97	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
98	jz	9f ;				/* edge, don't mask */	\
99	MASK_IRQ(irq_num) ;						\
1009:
101
102
103#ifdef APIC_INTR_REORDER
104#define EOI_IRQ(irq_num)						\
105	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
106	movl	(%eax), %eax ;						\
107	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
108	jz	9f ;				/* not active */	\
109	movl	$0, lapic_eoi ;						\
110	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1119:
112
113#else
114#define EOI_IRQ(irq_num)						\
115	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
116	jz	9f	;			/* not active */	\
117	movl	$0, lapic_eoi;						\
118	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1199:
120#endif
121
122
123/*
124 * Test to see if the source is currently masked, clear if so.
125 */
126#define UNMASK_IRQ(irq_num)					\
127	IMASK_LOCK ;				/* into critical reg */	\
128	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
129	je	7f ;			/* bit clear, not masked */	\
130	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
131	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
132	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
133	movl	%eax,(%ecx) ;			/* write the index */	\
134	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
135	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
136	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1377: ;									\
138	IMASK_UNLOCK
139
140#ifdef APIC_INTR_DIAGNOSTIC
141#ifdef APIC_INTR_DIAGNOSTIC_IRQ
142log_intr_event:
143	pushf
144	cli
145	pushl	$CNAME(apic_itrace_debuglock)
146	call	CNAME(s_lock_np)
147	addl	$4, %esp
148	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
149	andl	$32767, %ecx
150	movl	_cpuid, %eax
151	shll	$8,	%eax
152	orl	8(%esp), %eax
153	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
154	incl	%ecx
155	andl	$32767, %ecx
156	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
157	pushl	$CNAME(apic_itrace_debuglock)
158	call	CNAME(s_unlock_np)
159	addl	$4, %esp
160	popf
161	ret
162
163
164#define APIC_ITRACE(name, irq_num, id)					\
165	lock ;					/* MP-safe */		\
166	incl	CNAME(name) + (irq_num) * 4 ;				\
167	pushl	%eax ;							\
168	pushl	%ecx ;							\
169	pushl	%edx ;							\
170	movl	$(irq_num), %eax ;					\
171	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
172	jne	7f ;							\
173	pushl	$id ;							\
174	call	log_intr_event ;					\
175	addl	$4, %esp ;						\
1767: ;									\
177	popl	%edx ;							\
178	popl	%ecx ;							\
179	popl	%eax
180#else
181#define APIC_ITRACE(name, irq_num, id)					\
182	lock ;					/* MP-safe */		\
183	incl	CNAME(name) + (irq_num) * 4
184#endif
185
186#define APIC_ITRACE_ENTER 1
187#define APIC_ITRACE_EOI 2
188#define APIC_ITRACE_TRYISRLOCK 3
189#define APIC_ITRACE_GOTISRLOCK 4
190#define APIC_ITRACE_ENTER2 5
191#define APIC_ITRACE_LEAVE 6
192#define APIC_ITRACE_UNMASK 7
193#define APIC_ITRACE_ACTIVE 8
194#define APIC_ITRACE_MASKED 9
195#define APIC_ITRACE_NOISRLOCK 10
196#define APIC_ITRACE_MASKED2 11
197#define APIC_ITRACE_SPLZ 12
198#define APIC_ITRACE_DORETI 13
199
200#else
201#define APIC_ITRACE(name, irq_num, id)
202#endif
203
204/*
205 * Slow, threaded interrupts.
206 *
207 * XXX Most of the parameters here are obsolete.  Fix this when we're
208 * done.
209 * XXX we really shouldn't return via doreti if we just schedule the
210 * interrupt handler and don't run anything.  We could just do an
211 * iret.  FIXME.
212 */
213#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
214	.text ;								\
215	SUPERALIGN_TEXT ;						\
216/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
217IDTVEC(vec_name) ;							\
218	PUSH_FRAME ;							\
219	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
220	mov	%ax, %ds ;						\
221	mov	%ax, %es ;						\
222	movl	$KPSEL, %eax ;						\
223	mov	%ax, %fs ;						\
224;									\
225	maybe_extra_ipending ;						\
226;									\
227	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
228;									\
229	MASK_LEVEL_IRQ(irq_num) ;					\
230	EOI_IRQ(irq_num) ;						\
2310: ;									\
232	incb	_intr_nesting_level ;					\
233;	 								\
234  /* entry point used by doreti_unpend for HWIs. */			\
235__CONCAT(Xresume,irq_num): ;						\
236	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
237	pushl	$irq_num;			/* pass the IRQ */	\
238	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
239	sti ;								\
240	call	_sched_ithd ;						\
241	addl	$4, %esp ;		/* discard the parameter */	\
242	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
243;									\
244	MEXITCOUNT ;							\
245	jmp	doreti_next
246
247/*
248 * Handle "spurious INTerrupts".
249 * Notes:
250 *  This is different than the "spurious INTerrupt" generated by an
251 *   8259 PIC for missing INTs.  See the APIC documentation for details.
252 *  This routine should NOT do an 'EOI' cycle.
253 */
254	.text
255	SUPERALIGN_TEXT
256	.globl _Xspuriousint
257_Xspuriousint:
258
259	/* No EOI cycle used here */
260
261	iret
262
263
264/*
265 * Handle TLB shootdowns.
266 */
267	.text
268	SUPERALIGN_TEXT
269	.globl	_Xinvltlb
270_Xinvltlb:
271	pushl	%eax
272
273#ifdef COUNT_XINVLTLB_HITS
274	pushl	%fs
275	movl	$KPSEL, %eax
276	mov	%ax, %fs
277	movl	_cpuid, %eax
278	popl	%fs
279	ss
280	incl	_xhits(,%eax,4)
281#endif /* COUNT_XINVLTLB_HITS */
282
283	movl	%cr3, %eax		/* invalidate the TLB */
284	movl	%eax, %cr3
285
286	ss				/* stack segment, avoid %ds load */
287	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
288
289	popl	%eax
290	iret
291
292
293#ifdef BETTER_CLOCK
294
295/*
296 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
297 *
298 *  - Stores current cpu state in checkstate_cpustate[cpuid]
299 *      0 == user, 1 == sys, 2 == intr
300 *  - Stores current process in checkstate_curproc[cpuid]
301 *
302 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
303 *
304 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
305 */
306
307	.text
308	SUPERALIGN_TEXT
309	.globl _Xcpucheckstate
310	.globl _checkstate_cpustate
311	.globl _checkstate_curproc
312	.globl _checkstate_pc
313_Xcpucheckstate:
314	pushl	%eax
315	pushl	%ebx
316	pushl	%ds			/* save current data segment */
317	pushl	%fs
318
319	movl	$KDSEL, %eax
320	mov	%ax, %ds		/* use KERNEL data segment */
321	movl	$KPSEL, %eax
322	mov	%ax, %fs
323
324	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
325
326	movl	$0, %ebx
327	movl	20(%esp), %eax
328	andl	$3, %eax
329	cmpl	$3, %eax
330	je	1f
331	testl	$PSL_VM, 24(%esp)
332	jne	1f
333	incl	%ebx			/* system or interrupt */
3341:
335	movl	_cpuid, %eax
336	movl	%ebx, _checkstate_cpustate(,%eax,4)
337	movl	_curproc, %ebx
338	movl	%ebx, _checkstate_curproc(,%eax,4)
339	movl	16(%esp), %ebx
340	movl	%ebx, _checkstate_pc(,%eax,4)
341
342	lock				/* checkstate_probed_cpus |= (1<<id) */
343	btsl	%eax, _checkstate_probed_cpus
344
345	popl	%fs
346	popl	%ds			/* restore previous data segment */
347	popl	%ebx
348	popl	%eax
349	iret
350
351#endif /* BETTER_CLOCK */
352
353/*
354 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
355 *
356 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
357 *
358 *  - We need a better method of triggering asts on other cpus.
359 */
360
361	.text
362	SUPERALIGN_TEXT
363	.globl _Xcpuast
364_Xcpuast:
365	PUSH_FRAME
366	movl	$KDSEL, %eax
367	mov	%ax, %ds		/* use KERNEL data segment */
368	mov	%ax, %es
369	movl	$KPSEL, %eax
370	mov	%ax, %fs
371
372	movl	_cpuid, %eax
373	lock				/* checkstate_need_ast &= ~(1<<id) */
374	btrl	%eax, _checkstate_need_ast
375	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
376
377	lock
378	btsl	%eax, _checkstate_pending_ast
379	jc	1f
380
381	FAKE_MCOUNT(13*4(%esp))
382
383	orl	$AST_PENDING, _astpending	/* XXX */
384	incb	_intr_nesting_level
385	sti
386
387	movl	_cpuid, %eax
388	lock
389	btrl	%eax, _checkstate_pending_ast
390	lock
391	btrl	%eax, CNAME(resched_cpus)
392	jnc	2f
393	orl	$AST_PENDING+AST_RESCHED,_astpending
394	lock
395	incl	CNAME(want_resched_cnt)
3962:
397	lock
398	incl	CNAME(cpuast_cnt)
399	MEXITCOUNT
400	jmp	doreti_next
4011:
402	/* We are already in the process of delivering an ast for this CPU */
403	POP_FRAME
404	iret
405
406
407/*
408 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
409 */
410
411	.text
412	SUPERALIGN_TEXT
413	.globl _Xforward_irq
414_Xforward_irq:
415	PUSH_FRAME
416	movl	$KDSEL, %eax
417	mov	%ax, %ds		/* use KERNEL data segment */
418	mov	%ax, %es
419	movl	$KPSEL, %eax
420	mov	%ax, %fs
421
422	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
423
424	FAKE_MCOUNT(13*4(%esp))
425
426	lock
427	incl	CNAME(forward_irq_hitcnt)
428	cmpb	$4, _intr_nesting_level
429	jae	1f
430
431	incb	_intr_nesting_level
432	sti
433
434	MEXITCOUNT
435	jmp	doreti_next		/* Handle forwarded interrupt */
4361:
437	lock
438	incl	CNAME(forward_irq_toodeepcnt)
439	MEXITCOUNT
440	POP_FRAME
441	iret
442
443#if 0
444/*
445 *
446 */
447forward_irq:
448	MCOUNT
449	cmpl	$0,_invltlb_ok
450	jz	4f
451
452	cmpl	$0, CNAME(forward_irq_enabled)
453	jz	4f
454
455/* XXX - this is broken now, because mp_lock doesn't exist
456	movl	_mp_lock,%eax
457	cmpl	$FREE_LOCK,%eax
458	jne	1f
459 */
460	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
4611:
462	shrl	$24,%eax
463	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
464	shll	$24,%ecx
465	movl	lapic_icr_hi, %eax
466	andl	$~APIC_ID_MASK, %eax
467	orl	%ecx, %eax
468	movl	%eax, lapic_icr_hi
469
4702:
471	movl	lapic_icr_lo, %eax
472	andl	$APIC_DELSTAT_MASK,%eax
473	jnz	2b
474	movl	lapic_icr_lo, %eax
475	andl	$APIC_RESV2_MASK, %eax
476	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
477	movl	%eax, lapic_icr_lo
4783:
479	movl	lapic_icr_lo, %eax
480	andl	$APIC_DELSTAT_MASK,%eax
481	jnz	3b
4824:
483	ret
484#endif
485
486/*
487 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
488 *
489 *  - Signals its receipt.
490 *  - Waits for permission to restart.
491 *  - Signals its restart.
492 */
493
494	.text
495	SUPERALIGN_TEXT
496	.globl _Xcpustop
497_Xcpustop:
498	pushl	%ebp
499	movl	%esp, %ebp
500	pushl	%eax
501	pushl	%ecx
502	pushl	%edx
503	pushl	%ds			/* save current data segment */
504	pushl	%fs
505
506	movl	$KDSEL, %eax
507	mov	%ax, %ds		/* use KERNEL data segment */
508	movl	$KPSEL, %eax
509	mov	%ax, %fs
510
511	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
512
513	movl	_cpuid, %eax
514	imull	$PCB_SIZE, %eax
515	leal	CNAME(stoppcbs)(%eax), %eax
516	pushl	%eax
517	call	CNAME(savectx)		/* Save process context */
518	addl	$4, %esp
519
520
521	movl	_cpuid, %eax
522
523	lock
524	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
5251:
526	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
527	jnc	1b
528
529	lock
530	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
531	lock
532	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
533
534	test	%eax, %eax
535	jnz	2f
536
537	movl	CNAME(cpustop_restartfunc), %eax
538	test	%eax, %eax
539	jz	2f
540	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
541
542	call	*%eax
5432:
544	popl	%fs
545	popl	%ds			/* restore previous data segment */
546	popl	%edx
547	popl	%ecx
548	popl	%eax
549	movl	%ebp, %esp
550	popl	%ebp
551	iret
552
553
554MCOUNT_LABEL(bintr)
555	FAST_INTR(0,fastintr0)
556	FAST_INTR(1,fastintr1)
557	FAST_INTR(2,fastintr2)
558	FAST_INTR(3,fastintr3)
559	FAST_INTR(4,fastintr4)
560	FAST_INTR(5,fastintr5)
561	FAST_INTR(6,fastintr6)
562	FAST_INTR(7,fastintr7)
563	FAST_INTR(8,fastintr8)
564	FAST_INTR(9,fastintr9)
565	FAST_INTR(10,fastintr10)
566	FAST_INTR(11,fastintr11)
567	FAST_INTR(12,fastintr12)
568	FAST_INTR(13,fastintr13)
569	FAST_INTR(14,fastintr14)
570	FAST_INTR(15,fastintr15)
571	FAST_INTR(16,fastintr16)
572	FAST_INTR(17,fastintr17)
573	FAST_INTR(18,fastintr18)
574	FAST_INTR(19,fastintr19)
575	FAST_INTR(20,fastintr20)
576	FAST_INTR(21,fastintr21)
577	FAST_INTR(22,fastintr22)
578	FAST_INTR(23,fastintr23)
579#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
580/* Threaded interrupts */
581	INTR(0,intr0, CLKINTR_PENDING)
582	INTR(1,intr1,)
583	INTR(2,intr2,)
584	INTR(3,intr3,)
585	INTR(4,intr4,)
586	INTR(5,intr5,)
587	INTR(6,intr6,)
588	INTR(7,intr7,)
589	INTR(8,intr8,)
590	INTR(9,intr9,)
591	INTR(10,intr10,)
592	INTR(11,intr11,)
593	INTR(12,intr12,)
594	INTR(13,intr13,)
595	INTR(14,intr14,)
596	INTR(15,intr15,)
597	INTR(16,intr16,)
598	INTR(17,intr17,)
599	INTR(18,intr18,)
600	INTR(19,intr19,)
601	INTR(20,intr20,)
602	INTR(21,intr21,)
603	INTR(22,intr22,)
604	INTR(23,intr23,)
605MCOUNT_LABEL(eintr)
606
607/*
608 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
609 *
610 * - Calls the generic rendezvous action function.
611 */
612	.text
613	SUPERALIGN_TEXT
614	.globl	_Xrendezvous
615_Xrendezvous:
616	PUSH_FRAME
617	movl	$KDSEL, %eax
618	mov	%ax, %ds		/* use KERNEL data segment */
619	mov	%ax, %es
620	movl	$KPSEL, %eax
621	mov	%ax, %fs
622
623	call	_smp_rendezvous_action
624
625	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
626	POP_FRAME
627	iret
628
629
630	.data
631#if 0
632/* active flag for lazy masking */
633iactive:
634	.long	0
635#endif
636
637#ifdef COUNT_XINVLTLB_HITS
638	.globl	_xhits
639_xhits:
640	.space	(NCPU * 4), 0
641#endif /* COUNT_XINVLTLB_HITS */
642
643/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
644	.globl _stopped_cpus, _started_cpus
645_stopped_cpus:
646	.long	0
647_started_cpus:
648	.long	0
649
650#ifdef BETTER_CLOCK
651	.globl _checkstate_probed_cpus
652_checkstate_probed_cpus:
653	.long	0
654#endif /* BETTER_CLOCK */
655	.globl _checkstate_need_ast
656_checkstate_need_ast:
657	.long	0
658_checkstate_pending_ast:
659	.long	0
660	.globl CNAME(forward_irq_misscnt)
661	.globl CNAME(forward_irq_toodeepcnt)
662	.globl CNAME(forward_irq_hitcnt)
663	.globl CNAME(resched_cpus)
664	.globl CNAME(want_resched_cnt)
665	.globl CNAME(cpuast_cnt)
666	.globl CNAME(cpustop_restartfunc)
667CNAME(forward_irq_misscnt):
668	.long 0
669CNAME(forward_irq_hitcnt):
670	.long 0
671CNAME(forward_irq_toodeepcnt):
672	.long 0
673CNAME(resched_cpus):
674	.long 0
675CNAME(want_resched_cnt):
676	.long 0
677CNAME(cpuast_cnt):
678	.long 0
679CNAME(cpustop_restartfunc):
680	.long 0
681
682
683
684	.globl	_apic_pin_trigger
685_apic_pin_trigger:
686	.long	0
687
688	.text
689