apic_vector.s revision 70006
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 70006 2000-12-14 04:16:16Z jake $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	incb	PCPU(INTR_NESTING_LEVEL) ;				\
52	pushl	_intr_unit + (irq_num) * 4 ;				\
53	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
54	addl	$4, %esp ;						\
55	movl	$0, _lapic+LA_EOI ;					\
56	lock ; 								\
57	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
58	movl	_intr_countp + (irq_num) * 4, %eax ;			\
59	lock ; 								\
60	incl	(%eax) ;						\
61	MEXITCOUNT ;							\
62	jmp	_doreti
63
64#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
65#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
66
67#define MASK_IRQ(irq_num)						\
68	IMASK_LOCK ;				/* into critical reg */	\
69	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
70	jne	7f ;			/* masked, don't mask */	\
71	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
72	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
73	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
74	movl	%eax, (%ecx) ;			/* write the index */	\
75	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
76	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
77	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
787: ;						/* already masked */	\
79	IMASK_UNLOCK
80/*
81 * Test to see whether we are handling an edge or level triggered INT.
82 *  Level-triggered INTs must still be masked as we don't clear the source,
83 *  and the EOI cycle would cause redundant INTs to occur.
84 */
85#define MASK_LEVEL_IRQ(irq_num)						\
86	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
87	jz	9f ;				/* edge, don't mask */	\
88	MASK_IRQ(irq_num) ;						\
899:
90
91
92#ifdef APIC_INTR_REORDER
93#define EOI_IRQ(irq_num)						\
94	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
95	movl	(%eax), %eax ;						\
96	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
97	jz	9f ;				/* not active */	\
98	movl	$0, _lapic+LA_EOI ;					\
99	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1009:
101
102#else
103#define EOI_IRQ(irq_num)						\
104	testl	$IRQ_BIT(irq_num), _lapic+LA_ISR1;			\
105	jz	9f	;			/* not active */	\
106	movl	$0, _lapic+LA_EOI;					\
107	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129#ifdef APIC_INTR_DIAGNOSTIC
130#ifdef APIC_INTR_DIAGNOSTIC_IRQ
131log_intr_event:
132	pushf
133	cli
134	pushl	$CNAME(apic_itrace_debuglock)
135	call	CNAME(s_lock_np)
136	addl	$4, %esp
137	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
138	andl	$32767, %ecx
139	movl	PCPU(CPUID), %eax
140	shll	$8,	%eax
141	orl	8(%esp), %eax
142	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
143	incl	%ecx
144	andl	$32767, %ecx
145	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
146	pushl	$CNAME(apic_itrace_debuglock)
147	call	CNAME(s_unlock_np)
148	addl	$4, %esp
149	popf
150	ret
151
152
153#define APIC_ITRACE(name, irq_num, id)					\
154	lock ;					/* MP-safe */		\
155	incl	CNAME(name) + (irq_num) * 4 ;				\
156	pushl	%eax ;							\
157	pushl	%ecx ;							\
158	pushl	%edx ;							\
159	movl	$(irq_num), %eax ;					\
160	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
161	jne	7f ;							\
162	pushl	$id ;							\
163	call	log_intr_event ;					\
164	addl	$4, %esp ;						\
1657: ;									\
166	popl	%edx ;							\
167	popl	%ecx ;							\
168	popl	%eax
169#else
170#define APIC_ITRACE(name, irq_num, id)					\
171	lock ;					/* MP-safe */		\
172	incl	CNAME(name) + (irq_num) * 4
173#endif
174
175#define APIC_ITRACE_ENTER 1
176#define APIC_ITRACE_EOI 2
177#define APIC_ITRACE_TRYISRLOCK 3
178#define APIC_ITRACE_GOTISRLOCK 4
179#define APIC_ITRACE_ENTER2 5
180#define APIC_ITRACE_LEAVE 6
181#define APIC_ITRACE_UNMASK 7
182#define APIC_ITRACE_ACTIVE 8
183#define APIC_ITRACE_MASKED 9
184#define APIC_ITRACE_NOISRLOCK 10
185#define APIC_ITRACE_MASKED2 11
186#define APIC_ITRACE_SPLZ 12
187#define APIC_ITRACE_DORETI 13
188
189#else
190#define APIC_ITRACE(name, irq_num, id)
191#endif
192
193/*
194 * Slow, threaded interrupts.
195 *
196 * XXX Most of the parameters here are obsolete.  Fix this when we're
197 * done.
198 * XXX we really shouldn't return via doreti if we just schedule the
199 * interrupt handler and don't run anything.  We could just do an
200 * iret.  FIXME.
201 */
202#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
203	.text ;								\
204	SUPERALIGN_TEXT ;						\
205/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
206IDTVEC(vec_name) ;							\
207	PUSH_FRAME ;							\
208	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
209	mov	%ax, %ds ;						\
210	mov	%ax, %es ;						\
211	movl	$KPSEL, %eax ;						\
212	mov	%ax, %fs ;						\
213;									\
214	maybe_extra_ipending ;						\
215;									\
216	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
217;									\
218	MASK_LEVEL_IRQ(irq_num) ;					\
219	EOI_IRQ(irq_num) ;						\
2200: ;									\
221	incb	PCPU(INTR_NESTING_LEVEL) ;				\
222;	 								\
223  /* entry point used by doreti_unpend for HWIs. */			\
224__CONCAT(Xresume,irq_num): ;						\
225	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
226	pushl	$irq_num;			/* pass the IRQ */	\
227	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
228	sti ;								\
229	call	_sched_ithd ;						\
230	addl	$4, %esp ;		/* discard the parameter */	\
231	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
232;									\
233	MEXITCOUNT ;							\
234	jmp	_doreti
235
236/*
237 * Handle "spurious INTerrupts".
238 * Notes:
239 *  This is different than the "spurious INTerrupt" generated by an
240 *   8259 PIC for missing INTs.  See the APIC documentation for details.
241 *  This routine should NOT do an 'EOI' cycle.
242 */
243	.text
244	SUPERALIGN_TEXT
245	.globl _Xspuriousint
246_Xspuriousint:
247
248	/* No EOI cycle used here */
249
250	iret
251
252
253/*
254 * Handle TLB shootdowns.
255 */
256	.text
257	SUPERALIGN_TEXT
258	.globl	_Xinvltlb
259_Xinvltlb:
260	pushl	%eax
261
262#ifdef COUNT_XINVLTLB_HITS
263	pushl	%fs
264	movl	$KPSEL, %eax
265	mov	%ax, %fs
266	movl	PCPU(CPUID), %eax
267	popl	%fs
268	ss
269	incl	_xhits(,%eax,4)
270#endif /* COUNT_XINVLTLB_HITS */
271
272	movl	%cr3, %eax		/* invalidate the TLB */
273	movl	%eax, %cr3
274
275	ss				/* stack segment, avoid %ds load */
276	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
277
278	popl	%eax
279	iret
280
281
282#ifdef BETTER_CLOCK
283
284/*
285 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
286 *
287 *  - Stores current cpu state in checkstate_cpustate[cpuid]
288 *      0 == user, 1 == sys, 2 == intr
289 *  - Stores current process in checkstate_curproc[cpuid]
290 *
291 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
292 *
293 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
294 */
295
296	.text
297	SUPERALIGN_TEXT
298	.globl _Xcpucheckstate
299	.globl _checkstate_cpustate
300	.globl _checkstate_curproc
301	.globl _checkstate_pc
302_Xcpucheckstate:
303	pushl	%eax
304	pushl	%ebx
305	pushl	%ds			/* save current data segment */
306	pushl	%fs
307
308	movl	$KDSEL, %eax
309	mov	%ax, %ds		/* use KERNEL data segment */
310	movl	$KPSEL, %eax
311	mov	%ax, %fs
312
313	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
314
315	movl	$0, %ebx
316	movl	20(%esp), %eax
317	andl	$3, %eax
318	cmpl	$3, %eax
319	je	1f
320	testl	$PSL_VM, 24(%esp)
321	jne	1f
322	incl	%ebx			/* system or interrupt */
3231:
324	movl	PCPU(CPUID), %eax
325	movl	%ebx, _checkstate_cpustate(,%eax,4)
326	movl	PCPU(CURPROC), %ebx
327	movl	%ebx, _checkstate_curproc(,%eax,4)
328
329	movl	16(%esp), %ebx
330	movl	%ebx, _checkstate_pc(,%eax,4)
331
332	lock				/* checkstate_probed_cpus |= (1<<id) */
333	btsl	%eax, _checkstate_probed_cpus
334
335	popl	%fs
336	popl	%ds			/* restore previous data segment */
337	popl	%ebx
338	popl	%eax
339	iret
340
341#endif /* BETTER_CLOCK */
342
343/*
344 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
345 *
346 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
347 *
348 *  - We need a better method of triggering asts on other cpus.
349 */
350
351	.text
352	SUPERALIGN_TEXT
353	.globl _Xcpuast
354_Xcpuast:
355	PUSH_FRAME
356	movl	$KDSEL, %eax
357	mov	%ax, %ds		/* use KERNEL data segment */
358	mov	%ax, %es
359	movl	$KPSEL, %eax
360	mov	%ax, %fs
361
362	movl	PCPU(CPUID), %eax
363	lock				/* checkstate_need_ast &= ~(1<<id) */
364	btrl	%eax, _checkstate_need_ast
365	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
366
367	lock
368	btsl	%eax, _checkstate_pending_ast
369	jc	1f
370
371	FAKE_MCOUNT(13*4(%esp))
372
373	orl	$AST_PENDING, PCPU(ASTPENDING)	/* XXX */
374	incb	PCPU(INTR_NESTING_LEVEL)
375	sti
376
377	movl	PCPU(CPUID), %eax
378	lock
379	btrl	%eax, _checkstate_pending_ast
380	lock
381	btrl	%eax, CNAME(resched_cpus)
382	jnc	2f
383	orl	$AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
384	lock
385	incl	CNAME(want_resched_cnt)
3862:
387	lock
388	incl	CNAME(cpuast_cnt)
389	MEXITCOUNT
390	jmp	_doreti
3911:
392	/* We are already in the process of delivering an ast for this CPU */
393	POP_FRAME
394	iret
395
396
397/*
398 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
399 */
400
401	.text
402	SUPERALIGN_TEXT
403	.globl _Xforward_irq
404_Xforward_irq:
405	PUSH_FRAME
406	movl	$KDSEL, %eax
407	mov	%ax, %ds		/* use KERNEL data segment */
408	mov	%ax, %es
409	movl	$KPSEL, %eax
410	mov	%ax, %fs
411
412	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
413
414	FAKE_MCOUNT(13*4(%esp))
415
416	lock
417	incl	CNAME(forward_irq_hitcnt)
418	cmpb	$4, PCPU(INTR_NESTING_LEVEL)
419	jae	1f
420
421	incb	PCPU(INTR_NESTING_LEVEL)
422	sti
423
424	MEXITCOUNT
425	jmp	doreti_next		/* Handle forwarded interrupt */
4261:
427	lock
428	incl	CNAME(forward_irq_toodeepcnt)
429	MEXITCOUNT
430	POP_FRAME
431	iret
432
433#if 0
434/*
435 *
436 */
437forward_irq:
438	MCOUNT
439	cmpl	$0,_invltlb_ok
440	jz	4f
441
442	cmpl	$0, CNAME(forward_irq_enabled)
443	jz	4f
444
445/* XXX - this is broken now, because mp_lock doesn't exist
446	movl	_mp_lock,%eax
447	cmpl	$FREE_LOCK,%eax
448	jne	1f
449 */
450	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
4511:
452	shrl	$24,%eax
453	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
454	shll	$24,%ecx
455	movl	_lapic+LA_ICR_HI, %eax
456	andl	$~APIC_ID_MASK, %eax
457	orl	%ecx, %eax
458	movl	%eax, _lapic+LA_ICR_HI
459
4602:
461	movl	_lapic+LA_ICR_LO, %eax
462	andl	$APIC_DELSTAT_MASK,%eax
463	jnz	2b
464	movl	_lapic+LA_ICR_LO, %eax
465	andl	$APIC_RESV2_MASK, %eax
466	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
467	movl	%eax, _lapic+LA_ICR_LO
4683:
469	movl	_lapic+LA_ICR_LO, %eax
470	andl	$APIC_DELSTAT_MASK,%eax
471	jnz	3b
4724:
473	ret
474#endif
475
476/*
477 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
478 *
479 *  - Signals its receipt.
480 *  - Waits for permission to restart.
481 *  - Signals its restart.
482 */
483
484	.text
485	SUPERALIGN_TEXT
486	.globl _Xcpustop
487_Xcpustop:
488	pushl	%ebp
489	movl	%esp, %ebp
490	pushl	%eax
491	pushl	%ecx
492	pushl	%edx
493	pushl	%ds			/* save current data segment */
494	pushl	%fs
495
496	movl	$KDSEL, %eax
497	mov	%ax, %ds		/* use KERNEL data segment */
498	movl	$KPSEL, %eax
499	mov	%ax, %fs
500
501	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
502
503	movl	PCPU(CPUID), %eax
504	imull	$PCB_SIZE, %eax
505	leal	CNAME(stoppcbs)(%eax), %eax
506	pushl	%eax
507	call	CNAME(savectx)		/* Save process context */
508	addl	$4, %esp
509
510
511	movl	PCPU(CPUID), %eax
512
513	lock
514	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
5151:
516	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
517	jnc	1b
518
519	lock
520	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
521	lock
522	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
523
524	test	%eax, %eax
525	jnz	2f
526
527	movl	CNAME(cpustop_restartfunc), %eax
528	test	%eax, %eax
529	jz	2f
530	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
531
532	call	*%eax
5332:
534	popl	%fs
535	popl	%ds			/* restore previous data segment */
536	popl	%edx
537	popl	%ecx
538	popl	%eax
539	movl	%ebp, %esp
540	popl	%ebp
541	iret
542
543
544MCOUNT_LABEL(bintr)
545	FAST_INTR(0,fastintr0)
546	FAST_INTR(1,fastintr1)
547	FAST_INTR(2,fastintr2)
548	FAST_INTR(3,fastintr3)
549	FAST_INTR(4,fastintr4)
550	FAST_INTR(5,fastintr5)
551	FAST_INTR(6,fastintr6)
552	FAST_INTR(7,fastintr7)
553	FAST_INTR(8,fastintr8)
554	FAST_INTR(9,fastintr9)
555	FAST_INTR(10,fastintr10)
556	FAST_INTR(11,fastintr11)
557	FAST_INTR(12,fastintr12)
558	FAST_INTR(13,fastintr13)
559	FAST_INTR(14,fastintr14)
560	FAST_INTR(15,fastintr15)
561	FAST_INTR(16,fastintr16)
562	FAST_INTR(17,fastintr17)
563	FAST_INTR(18,fastintr18)
564	FAST_INTR(19,fastintr19)
565	FAST_INTR(20,fastintr20)
566	FAST_INTR(21,fastintr21)
567	FAST_INTR(22,fastintr22)
568	FAST_INTR(23,fastintr23)
569	FAST_INTR(24,fastintr24)
570	FAST_INTR(25,fastintr25)
571	FAST_INTR(26,fastintr26)
572	FAST_INTR(27,fastintr27)
573	FAST_INTR(28,fastintr28)
574	FAST_INTR(29,fastintr29)
575	FAST_INTR(30,fastintr30)
576	FAST_INTR(31,fastintr31)
577#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
578/* Threaded interrupts */
579	INTR(0,intr0, CLKINTR_PENDING)
580	INTR(1,intr1,)
581	INTR(2,intr2,)
582	INTR(3,intr3,)
583	INTR(4,intr4,)
584	INTR(5,intr5,)
585	INTR(6,intr6,)
586	INTR(7,intr7,)
587	INTR(8,intr8,)
588	INTR(9,intr9,)
589	INTR(10,intr10,)
590	INTR(11,intr11,)
591	INTR(12,intr12,)
592	INTR(13,intr13,)
593	INTR(14,intr14,)
594	INTR(15,intr15,)
595	INTR(16,intr16,)
596	INTR(17,intr17,)
597	INTR(18,intr18,)
598	INTR(19,intr19,)
599	INTR(20,intr20,)
600	INTR(21,intr21,)
601	INTR(22,intr22,)
602	INTR(23,intr23,)
603	INTR(24,intr24,)
604	INTR(25,intr25,)
605	INTR(26,intr26,)
606	INTR(27,intr27,)
607	INTR(28,intr28,)
608	INTR(29,intr29,)
609	INTR(30,intr30,)
610	INTR(31,intr31,)
611MCOUNT_LABEL(eintr)
612
613/*
614 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
615 *
616 * - Calls the generic rendezvous action function.
617 */
618	.text
619	SUPERALIGN_TEXT
620	.globl	_Xrendezvous
621_Xrendezvous:
622	PUSH_FRAME
623	movl	$KDSEL, %eax
624	mov	%ax, %ds		/* use KERNEL data segment */
625	mov	%ax, %es
626	movl	$KPSEL, %eax
627	mov	%ax, %fs
628
629	call	_smp_rendezvous_action
630
631	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
632	POP_FRAME
633	iret
634
635
636	.data
637#if 0
638/* active flag for lazy masking */
639iactive:
640	.long	0
641#endif
642
643#ifdef COUNT_XINVLTLB_HITS
644	.globl	_xhits
645_xhits:
646	.space	(NCPU * 4), 0
647#endif /* COUNT_XINVLTLB_HITS */
648
649/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
650	.globl _stopped_cpus, _started_cpus
651_stopped_cpus:
652	.long	0
653_started_cpus:
654	.long	0
655
656#ifdef BETTER_CLOCK
657	.globl _checkstate_probed_cpus
658_checkstate_probed_cpus:
659	.long	0
660#endif /* BETTER_CLOCK */
661	.globl _checkstate_need_ast
662_checkstate_need_ast:
663	.long	0
664_checkstate_pending_ast:
665	.long	0
666	.globl CNAME(forward_irq_misscnt)
667	.globl CNAME(forward_irq_toodeepcnt)
668	.globl CNAME(forward_irq_hitcnt)
669	.globl CNAME(resched_cpus)
670	.globl CNAME(want_resched_cnt)
671	.globl CNAME(cpuast_cnt)
672	.globl CNAME(cpustop_restartfunc)
673CNAME(forward_irq_misscnt):
674	.long 0
675CNAME(forward_irq_hitcnt):
676	.long 0
677CNAME(forward_irq_toodeepcnt):
678	.long 0
679CNAME(resched_cpus):
680	.long 0
681CNAME(want_resched_cnt):
682	.long 0
683CNAME(cpuast_cnt):
684	.long 0
685CNAME(cpustop_restartfunc):
686	.long 0
687
688	.globl	_apic_pin_trigger
689_apic_pin_trigger:
690	.long	0
691
692	.text
693