apic_vector.s revision 32518
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.24 1997/12/08 22:59:34 fsmp Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	movl	$KDSEL,%eax ;						\
62	movl	%ax,%ds ;						\
63	MAYBE_MOVW_AX_ES ;						\
64	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
65	pushl	_intr_unit + (irq_num) * 4 ;				\
66	GET_FAST_INTR_LOCK ;						\
67	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68	REL_FAST_INTR_LOCK ;						\
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	MAYBE_POPL_ES ;							\
78	popl	%ds ;							\
79	popl	%edx ;							\
80	popl	%ecx ;							\
81	popl	%eax ;							\
82	iret
83
84#else /* FAST_WITHOUTCPL */
85
86#define	FAST_INTR(irq_num, vec_name)					\
87	.text ;								\
88	SUPERALIGN_TEXT ;						\
89IDTVEC(vec_name) ;							\
90	pushl	%eax ;		/* save only call-used registers */	\
91	pushl	%ecx ;							\
92	pushl	%edx ;							\
93	pushl	%ds ;							\
94	MAYBE_PUSHL_ES ;						\
95	movl	$KDSEL, %eax ;						\
96	movl	%ax, %ds ;						\
97	MAYBE_MOVW_AX_ES ;						\
98	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
99	GET_FAST_INTR_LOCK ;						\
100	pushl	_intr_unit + (irq_num) * 4 ;				\
101	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
102	addl	$4, %esp ;						\
103	movl	$0, lapic_eoi ;						\
104	lock ; 								\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	lock ; 								\
108	incl	(%eax) ;						\
109	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
110	notl	%eax ;							\
111	andl	_ipending, %eax ;					\
112	jne	2f ; 		/* yes, maybe handle them */		\
1131: ;									\
114	MEXITCOUNT ;							\
115	REL_FAST_INTR_LOCK ;						\
116	MAYBE_POPL_ES ;							\
117	popl	%ds ;							\
118	popl	%edx ;							\
119	popl	%ecx ;							\
120	popl	%eax ;							\
121	iret ;								\
122;									\
123	ALIGN_TEXT ;							\
1242: ;									\
125	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
126	jae	1b ;		/* no, return */			\
127	movl	_cpl, %eax ;						\
128	/* XXX next line is probably unnecessary now. */		\
129	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
130	lock ; 								\
131	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
132	sti ;			/* to do this as early as possible */	\
133	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
134	popl	%ecx ;		/* ... original %ds ... */		\
135	popl	%edx ;							\
136	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
137	pushal ;		/* build fat frame (grrr) ... */	\
138	pushl	%ecx ;		/* ... actually %ds ... */		\
139	pushl	%es ;							\
140	movl	$KDSEL, %eax ;						\
141	movl	%ax, %es ;						\
142	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
143	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
144	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
145	pushl	%eax ;							\
146	subl	$4, %esp ;	/* junk for unit number */		\
147	MEXITCOUNT ;							\
148	jmp	_doreti
149
150#endif /** FAST_WITHOUTCPL */
151
152
153/*
154 *
155 */
156#define PUSH_FRAME							\
157	pushl	$0 ;		/* dummy error code */			\
158	pushl	$0 ;		/* dummy trap type */			\
159	pushal ;							\
160	pushl	%ds ;		/* save data and extra segments ... */	\
161	pushl	%es
162
163#define POP_FRAME							\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp
168
169/*
170 * Test to see whether we are handling an edge or level triggered INT.
171 *  Level-triggered INTs must still be masked as we don't clear the source,
172 *  and the EOI cycle would cause redundant INTs to occur.
173 */
174#define MASK_LEVEL_IRQ(irq_num)						\
175	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
176	jz	8f ;				/* edge, don't mask */	\
177	IMASK_LOCK ;				/* into critical reg */	\
178	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
179	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
180	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
181	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
182	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
183	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
184	IMASK_UNLOCK ;							\
1858:
186
187/*
188 * Test to see if the source is currntly masked, clear if so.
189 */
190#define UNMASK_IRQ(irq_num)					\
191	IMASK_LOCK ;				/* into critical reg */	\
192	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
193	jne	7f ;				/* bit set, masked */	\
194	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
195	jz	9f ;				/* edge, don't EOI */	\
196	movl	$0, lapic_eoi ;			/* should be safe */	\
197	jmp	9f ;				/* skip unmasking */	\
1987:									\
199	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
200	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
201	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
202	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
203	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
204	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2059: ;									\
206	IMASK_UNLOCK
207
208#ifdef INTR_SIMPLELOCK
209#define ENLOCK
210#define DELOCK
211#define LATELOCK call	_get_isrlock
212#else
213#define ENLOCK \
214	ISR_TRYLOCK ;		/* XXX this is going away... */		\
215	testl	%eax, %eax ;			/* did we get it? */	\
216	jz	1f
217#define DELOCK	ISR_RELLOCK
218#define LATELOCK
219#endif
220
221#ifdef CPL_AND_CML
222
223#define	INTR(irq_num, vec_name)						\
224	.text ;								\
225	SUPERALIGN_TEXT ;						\
226/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
227IDTVEC(vec_name) ;							\
228	PUSH_FRAME ;							\
229	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
230	movl	%ax, %ds ;						\
231	movl	%ax, %es ;						\
232;									\
233	lock ;					/* MP-safe */		\
234	btsl	$(irq_num), iactive ;		/* lazy masking */	\
235	jc	1f ;				/* already active */	\
236;									\
237	ENLOCK ;							\
238;									\
239	AVCPL_LOCK ;				/* MP-safe */		\
240	testl	$IRQ_BIT(irq_num), _cpl ;				\
241	jne	2f ;				/* this INT masked */	\
242	testl	$IRQ_BIT(irq_num), _cml ;				\
243	jne	2f ;				/* this INT masked */	\
244	orl	$IRQ_BIT(irq_num), _cil ;				\
245	AVCPL_UNLOCK ;							\
246;									\
247;;;	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
248	incb	_intr_nesting_level ;					\
249;	 								\
250  /* entry point used by doreti_unpend for HWIs. */			\
251__CONCAT(Xresume,irq_num): ;						\
252	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
253	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
254	movl	_intr_countp + (irq_num) * 4, %eax ;			\
255	lock ;	incl	(%eax) ;					\
256;									\
257	AVCPL_LOCK ;				/* MP-safe */		\
258	movl	_cml, %eax ;						\
259	pushl	%eax ;							\
260	orl	_intr_mask + (irq_num) * 4, %eax ;			\
261	movl	%eax, _cml ;						\
262	AVCPL_UNLOCK ;							\
263;									\
264	pushl	_intr_unit + (irq_num) * 4 ;				\
265	incl	_inside_intr ;						\
266	sti ;								\
267	call	*_intr_handler + (irq_num) * 4 ;			\
268	cli ;								\
269	decl	_inside_intr ;						\
270;									\
271	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
272	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
273	UNMASK_IRQ(irq_num) ;						\
274	sti ;				/* doreti repeats cli/sti */	\
275	MEXITCOUNT ;							\
276	LATELOCK ;							\
277	jmp	_doreti ;						\
278;									\
279	ALIGN_TEXT ;							\
2801: ;						/* active or locked */	\
281	MASK_LEVEL_IRQ(irq_num) ;					\
282	movl	$0, lapic_eoi ;			/* do the EOI */	\
283;									\
284	AVCPL_LOCK ;				/* MP-safe */		\
285	orl	$IRQ_BIT(irq_num), _ipending ;				\
286	AVCPL_UNLOCK ;							\
287;									\
288	POP_FRAME ;							\
289	iret ;								\
290;									\
291	ALIGN_TEXT ;							\
2922: ;						/* masked by cpl|cml */	\
293	AVCPL_UNLOCK ;							\
294	DELOCK ;		/* XXX this is going away... */		\
295	jmp	1b
296
297#else /* CPL_AND_CML */
298
299#define	INTR(irq_num, vec_name)						\
300	.text ;								\
301	SUPERALIGN_TEXT ;						\
302/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
303IDTVEC(vec_name) ;							\
304	PUSH_FRAME ;							\
305	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
306	movl	%ax, %ds ;						\
307	movl	%ax, %es ;						\
308;									\
309	lock ;					/* MP-safe */		\
310	btsl	$(irq_num), iactive ;		/* lazy masking */	\
311	jc	1f ;				/* already active */	\
312;									\
313	ISR_TRYLOCK ;		/* XXX this is going away... */		\
314	testl	%eax, %eax ;			/* did we get it? */	\
315	jz	1f ;				/* no */		\
316;									\
317	AVCPL_LOCK ;				/* MP-safe */		\
318	testl	$IRQ_BIT(irq_num), _cpl ;				\
319	jne	2f ;				/* this INT masked */	\
320	AVCPL_UNLOCK ;							\
321;									\
322;;;	movl	$0, lapic_eoi ;			/* XXX too soon? */	\
323	incb	_intr_nesting_level ;					\
324;	 								\
325  /* entry point used by doreti_unpend for HWIs. */			\
326__CONCAT(Xresume,irq_num): ;						\
327	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
328	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
329	movl	_intr_countp + (irq_num) * 4, %eax ;			\
330	lock ;	incl	(%eax) ;					\
331;									\
332	AVCPL_LOCK ;				/* MP-safe */		\
333	movl	_cpl, %eax ;						\
334	pushl	%eax ;							\
335	orl	_intr_mask + (irq_num) * 4, %eax ;			\
336	movl	%eax, _cpl ;						\
337	AVCPL_UNLOCK ;							\
338;									\
339	pushl	_intr_unit + (irq_num) * 4 ;				\
340	sti ;								\
341	call	*_intr_handler + (irq_num) * 4 ;			\
342	cli ;								\
343;									\
344	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
345	UNMASK_IRQ(irq_num) ;						\
346	sti ;				/* doreti repeats cli/sti */	\
347	MEXITCOUNT ;							\
348	jmp	_doreti ;						\
349;									\
350	ALIGN_TEXT ;							\
3511: ;						/* active or locked */	\
352	MASK_LEVEL_IRQ(irq_num) ;					\
353	movl	$0, lapic_eoi ;			/* do the EOI */	\
354;									\
355	AVCPL_LOCK ;				/* MP-safe */		\
356	orl	$IRQ_BIT(irq_num), _ipending ;				\
357	AVCPL_UNLOCK ;							\
358;									\
359	POP_FRAME ;							\
360	iret ;								\
361;									\
362	ALIGN_TEXT ;							\
3632: ;						/* masked by cpl */	\
364	AVCPL_UNLOCK ;							\
365	ISR_RELLOCK ;		/* XXX this is going away... */		\
366	jmp	1b
367
368#endif /* CPL_AND_CML */
369
370
371/*
372 * Handle "spurious INTerrupts".
373 * Notes:
374 *  This is different than the "spurious INTerrupt" generated by an
375 *   8259 PIC for missing INTs.  See the APIC documentation for details.
376 *  This routine should NOT do an 'EOI' cycle.
377 */
378	.text
379	SUPERALIGN_TEXT
380	.globl _Xspuriousint
381_Xspuriousint:
382
383	/* No EOI cycle used here */
384
385	iret
386
387
388/*
389 * Handle TLB shootdowns.
390 */
391	.text
392	SUPERALIGN_TEXT
393	.globl	_Xinvltlb
394_Xinvltlb:
395	pushl	%eax
396
397#ifdef COUNT_XINVLTLB_HITS
398	ss
399	movl	_cpuid, %eax
400	ss
401	incl	_xhits(,%eax,4)
402#endif /* COUNT_XINVLTLB_HITS */
403
404	movl	%cr3, %eax		/* invalidate the TLB */
405	movl	%eax, %cr3
406
407	ss				/* stack segment, avoid %ds load */
408	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
409
410	popl	%eax
411	iret
412
413
414#ifdef BETTER_CLOCK
415
416/*
417 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
418 *
419 *  - Stores current cpu state in checkstate_cpustate[cpuid]
420 *      0 == user, 1 == sys, 2 == intr
421 *  - Stores current process in checkstate_curproc[cpuid]
422 *
423 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
424 *
425 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags
426 */
427
428	.text
429	SUPERALIGN_TEXT
430	.globl _Xcpucheckstate
431	.globl _checkstate_cpustate
432	.globl _checkstate_curproc
433	.globl _checkstate_pc
434_Xcpucheckstate:
435	pushl	%eax
436	pushl	%ebx
437	pushl	%ds			/* save current data segment */
438
439	movl	$KDSEL, %eax
440	movl	%ax, %ds		/* use KERNEL data segment */
441
442	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
443
444	movl	$0, %ebx
445	movl	16(%esp), %eax
446	andl	$3, %eax
447	cmpl	$3, %eax
448	je	1f
449#ifdef VM86
450	testl	$PSL_VM, 20(%esp)
451	jne	1f
452#endif
453	incl	%ebx			/* system or interrupt */
454#ifdef CPL_AND_CML
455	cmpl	$0, _inside_intr
456	je	1f
457	incl	%ebx			/* interrupt */
458#endif
4591:
460	movl	_cpuid, %eax
461	movl	%ebx, _checkstate_cpustate(,%eax,4)
462	movl	_curproc, %ebx
463	movl	%ebx, _checkstate_curproc(,%eax,4)
464	movl	12(%esp), %ebx
465	movl	%ebx, _checkstate_pc(,%eax,4)
466
467	lock				/* checkstate_probed_cpus |= (1<<id) */
468	btsl	%eax, _checkstate_probed_cpus
469
470	popl	%ds			/* restore previous data segment */
471	popl	%ebx
472	popl	%eax
473	iret
474
475/*
476 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
477 *
478 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
479 *
480 *  - We need a better method of triggering asts on other cpus.
481 */
482
483	.text
484	SUPERALIGN_TEXT
485	.globl _Xcpuast
486_Xcpuast:
487	PUSH_FRAME
488	movl	$KDSEL, %eax
489	movl	%ax, %ds		/* use KERNEL data segment */
490	movl	%ax, %es
491
492	movl	_cpuid, %eax
493	lock				/* checkstate_need_ast &= ~(1<<id) */
494	btrl	%eax, _checkstate_need_ast
495	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
496
497	lock
498	btsl	%eax, _checkstate_pending_ast
499	jc	1f
500
501	FAKE_MCOUNT(12*4(%esp))
502
503	/*
504	 * Giant locks do not come cheap.
505	 * A lot of cycles are going to be wasted here.
506	 */
507	call	_get_isrlock
508
509	AVCPL_LOCK
510#ifdef CPL_AND_CML
511	movl	_cml, %eax
512#else
513	movl	_cpl, %eax
514#endif
515	pushl	%eax
516	AVCPL_UNLOCK
517	lock
518	incb	_intr_nesting_level
519	sti
520
521	pushl	$0
522
523	lock
524	orl	$SWI_AST_PENDING, _ipending
525
526	movl	_cpuid, %eax
527	lock
528	btrl	%eax, _checkstate_pending_ast
529
530	MEXITCOUNT
531	jmp	_doreti
5321:
533	/* We are already in the process of delivering an ast for this CPU */
534	POP_FRAME
535	iret
536
537#endif /* BETTER_CLOCK */
538
539
540/*
541 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
542 *
543 *  - Signals its receipt.
544 *  - Waits for permission to restart.
545 *  - Signals its restart.
546 */
547
548	.text
549	SUPERALIGN_TEXT
550	.globl _Xcpustop
551_Xcpustop:
552	pushl	%eax
553	pushl	%ds			/* save current data segment */
554
555	movl	$KDSEL, %eax
556	movl	%ax, %ds		/* use KERNEL data segment */
557
558	movl	_cpuid, %eax
559
560	lock
561	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
5621:
563	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
564	jnc	1b
565
566	lock
567	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
568
569	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
570
571	popl	%ds			/* restore previous data segment */
572	popl	%eax
573	iret
574
575
576MCOUNT_LABEL(bintr)
577	FAST_INTR(0,fastintr0)
578	FAST_INTR(1,fastintr1)
579	FAST_INTR(2,fastintr2)
580	FAST_INTR(3,fastintr3)
581	FAST_INTR(4,fastintr4)
582	FAST_INTR(5,fastintr5)
583	FAST_INTR(6,fastintr6)
584	FAST_INTR(7,fastintr7)
585	FAST_INTR(8,fastintr8)
586	FAST_INTR(9,fastintr9)
587	FAST_INTR(10,fastintr10)
588	FAST_INTR(11,fastintr11)
589	FAST_INTR(12,fastintr12)
590	FAST_INTR(13,fastintr13)
591	FAST_INTR(14,fastintr14)
592	FAST_INTR(15,fastintr15)
593	FAST_INTR(16,fastintr16)
594	FAST_INTR(17,fastintr17)
595	FAST_INTR(18,fastintr18)
596	FAST_INTR(19,fastintr19)
597	FAST_INTR(20,fastintr20)
598	FAST_INTR(21,fastintr21)
599	FAST_INTR(22,fastintr22)
600	FAST_INTR(23,fastintr23)
601	INTR(0,intr0)
602	INTR(1,intr1)
603	INTR(2,intr2)
604	INTR(3,intr3)
605	INTR(4,intr4)
606	INTR(5,intr5)
607	INTR(6,intr6)
608	INTR(7,intr7)
609	INTR(8,intr8)
610	INTR(9,intr9)
611	INTR(10,intr10)
612	INTR(11,intr11)
613	INTR(12,intr12)
614	INTR(13,intr13)
615	INTR(14,intr14)
616	INTR(15,intr15)
617	INTR(16,intr16)
618	INTR(17,intr17)
619	INTR(18,intr18)
620	INTR(19,intr19)
621	INTR(20,intr20)
622	INTR(21,intr21)
623	INTR(22,intr22)
624	INTR(23,intr23)
625MCOUNT_LABEL(eintr)
626
627	.data
628/*
629 * Addresses of interrupt handlers.
630 *  XresumeNN: Resumption addresses for HWIs.
631 */
632	.globl _ihandlers
633_ihandlers:
634ihandlers:
635/*
636 * used by:
637 *  ipl.s:	doreti_unpend
638 */
639	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
640	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
641	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
642	.long	Xresume12, Xresume13, Xresume14, Xresume15
643	.long	Xresume16, Xresume17, Xresume18, Xresume19
644	.long	Xresume20, Xresume21, Xresume22, Xresume23
645/*
646 * used by:
647 *  ipl.s:	doreti_unpend
648 *  apic_ipl.s:	splz_unpend
649 */
650	.long	swi_tty, swi_net
651	.long	dummycamisr, dummycamisr
652	.long	_swi_vm, 0
653	.long	_softclock, swi_ast
654
655imasks:				/* masks for interrupt handlers */
656	.space	NHWI*4		/* padding; HWI masks are elsewhere */
657
658	.long	SWI_TTY_MASK, SWI_NET_MASK
659	.long	SWI_CAMNET_MASK, SWI_CAMBIO_MASK
660	.long	SWI_VM_MASK, 0
661	.long	SWI_CLOCK_MASK, SWI_AST_MASK
662
663/*
664 * IDT vector entry points for the HWIs.
665 *
666 * used by:
667 *   i386/isa/clock.c:		setup Xintr8254
668 */
669	.globl _ivectors
670_ivectors:
671	.long	_Xintr0,  _Xintr1,  _Xintr2,  _Xintr3
672	.long	_Xintr4,  _Xintr5,  _Xintr6,  _Xintr7
673	.long	_Xintr8,  _Xintr9,  _Xintr10, _Xintr11
674	.long	_Xintr12, _Xintr13, _Xintr14, _Xintr15
675	.long	_Xintr16, _Xintr17, _Xintr18, _Xintr19
676	.long	_Xintr20, _Xintr21, _Xintr22, _Xintr23
677
678/* active flag for lazy masking */
679iactive:
680	.long	0
681
682#ifdef COUNT_XINVLTLB_HITS
683	.globl	_xhits
684_xhits:
685	.space	(NCPU * 4), 0
686#endif /* COUNT_XINVLTLB_HITS */
687
688/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
689	.globl _stopped_cpus, _started_cpus
690_stopped_cpus:
691	.long	0
692_started_cpus:
693	.long	0
694
695#ifdef BETTER_CLOCK
696	.globl _checkstate_probed_cpus
697	.globl _checkstate_need_ast
698_checkstate_probed_cpus:
699	.long	0
700_checkstate_need_ast:
701	.long	0
702_checkstate_pending_ast:
703	.long	0
704#endif
705
706	.globl	_apic_pin_trigger
707_apic_pin_trigger:
708	.space	(NAPIC * 4), 0
709
710
711/*
712 * Interrupt counters and names.  The format of these and the label names
713 * must agree with what vmstat expects.  The tables are indexed by device
714 * ids so that we don't have to move the names around as devices are
715 * attached.
716 */
717#include "vector.h"
718	.globl	_intrcnt, _eintrcnt
719_intrcnt:
720	.space	(NR_DEVICES + ICU_LEN) * 4
721_eintrcnt:
722
723	.globl	_intrnames, _eintrnames
724_intrnames:
725	.ascii	DEVICE_NAMES
726	.asciz	"stray irq0"
727	.asciz	"stray irq1"
728	.asciz	"stray irq2"
729	.asciz	"stray irq3"
730	.asciz	"stray irq4"
731	.asciz	"stray irq5"
732	.asciz	"stray irq6"
733	.asciz	"stray irq7"
734	.asciz	"stray irq8"
735	.asciz	"stray irq9"
736	.asciz	"stray irq10"
737	.asciz	"stray irq11"
738	.asciz	"stray irq12"
739	.asciz	"stray irq13"
740	.asciz	"stray irq14"
741	.asciz	"stray irq15"
742	.asciz	"stray irq16"
743	.asciz	"stray irq17"
744	.asciz	"stray irq18"
745	.asciz	"stray irq19"
746	.asciz	"stray irq20"
747	.asciz	"stray irq21"
748	.asciz	"stray irq22"
749	.asciz	"stray irq23"
750_eintrnames:
751
752	.text
753