apic_vector.s revision 45555
167468Snon/*
279697Snon *	from: vector.s, 386BSD 0.1 unknown origin
367468Snon *	$Id: apic_vector.s,v 1.34 1998/09/06 22:41:41 tegge Exp $
467468Snon */
5139749Simp
667468Snon
767468Snon#include <machine/apic.h>
867468Snon#include <machine/smp.h>
967468Snon
1067468Snon#include "i386/isa/intr_machdep.h"
1167468Snon
1267468Snon
1367468Snon#ifdef FAST_SIMPLELOCK
1467468Snon
1567468Snon#define GET_FAST_INTR_LOCK						\
1667468Snon	pushl	$_fast_intr_lock ;		/* address of lock */	\
1767468Snon	call	_s_lock ;			/* MP-safe */		\
1867468Snon	addl	$4,%esp
1967468Snon
2067468Snon#define REL_FAST_INTR_LOCK						\
2167468Snon	pushl	$_fast_intr_lock ;		/* address of lock */	\
2267468Snon	call	_s_unlock ;			/* MP-safe */		\
2367468Snon	addl	$4,%esp
2467468Snon
2567468Snon#else /* FAST_SIMPLELOCK */
2667468Snon
2767468Snon#define GET_FAST_INTR_LOCK						\
2867468Snon	call	_get_isrlock
2967468Snon
3067468Snon#define REL_FAST_INTR_LOCK						\
3167468Snon	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
3267468Snon	call	_MPrellock ;						\
3367468Snon	add	$4, %esp
3467468Snon
3567468Snon#endif /* FAST_SIMPLELOCK */
3667468Snon
3767468Snon/* convert an absolute IRQ# into a bitmask */
3867468Snon#define IRQ_BIT(irq_num)	(1 << (irq_num))
3967468Snon
4067468Snon/* make an index into the IO APIC from the IRQ# */
4167468Snon#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
4267468Snon
4367468Snon
4467468Snon/*
4567468Snon * Macros for interrupt interrupt entry, call to handler, and exit.
4667468Snon */
4767468Snon
4867468Snon#ifdef FAST_WITHOUTCPL
4967468Snon
5067468Snon/*
5167468Snon */
5267468Snon#define	FAST_INTR(irq_num, vec_name)					\
5367468Snon	.text ;								\
5467468Snon	SUPERALIGN_TEXT ;						\
5567468SnonIDTVEC(vec_name) ;							\
5667468Snon	pushl	%eax ;		/* save only call-used registers */	\
5767468Snon	pushl	%ecx ;							\
5867468Snon	pushl	%edx ;							\
5967468Snon	pushl	%ds ;							\
6067468Snon	MAYBE_PUSHL_ES ;						\
6167468Snon	movl	$KDSEL,%eax ;						\
6267468Snon	movl	%ax,%ds ;						\
6367468Snon	MAYBE_MOVW_AX_ES ;						\
6467468Snon	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
6567468Snon	pushl	_intr_unit + (irq_num) * 4 ;				\
6667468Snon	GET_FAST_INTR_LOCK ;						\
6767468Snon	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
6867468Snon	REL_FAST_INTR_LOCK ;						\
6967468Snon	addl	$4, %esp ;						\
7067468Snon	movl	$0, lapic_eoi ;						\
7167468Snon	lock ; 								\
7267468Snon	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
7367468Snon	movl	_intr_countp + (irq_num) * 4, %eax ;			\
7467468Snon	lock ; 								\
7567468Snon	incl	(%eax) ;						\
7667468Snon	MEXITCOUNT ;							\
7767468Snon	MAYBE_POPL_ES ;							\
7867468Snon	popl	%ds ;							\
7967468Snon	popl	%edx ;							\
8067468Snon	popl	%ecx ;							\
8167468Snon	popl	%eax ;							\
8267468Snon	iret
8367468Snon
8467468Snon#else /* FAST_WITHOUTCPL */
8567468Snon
8667468Snon#define	FAST_INTR(irq_num, vec_name)					\
8767468Snon	.text ;								\
8867468Snon	SUPERALIGN_TEXT ;						\
8967468SnonIDTVEC(vec_name) ;							\
9067468Snon	pushl	%eax ;		/* save only call-used registers */	\
9167468Snon	pushl	%ecx ;							\
9267468Snon	pushl	%edx ;							\
9367468Snon	pushl	%ds ;							\
9467468Snon	MAYBE_PUSHL_ES ;						\
9579697Snon	movl	$KDSEL, %eax ;						\
9667468Snon	movl	%ax, %ds ;						\
9767468Snon	MAYBE_MOVW_AX_ES ;						\
9867468Snon	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
9967468Snon	GET_FAST_INTR_LOCK ;						\
10067468Snon	pushl	_intr_unit + (irq_num) * 4 ;				\
10167468Snon	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
10267468Snon	addl	$4, %esp ;						\
10367468Snon	movl	$0, lapic_eoi ;						\
10467468Snon	lock ; 								\
10567468Snon	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
10667468Snon	movl	_intr_countp + (irq_num) * 4,%eax ;			\
10767468Snon	lock ; 								\
10867468Snon	incl	(%eax) ;						\
10967468Snon	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
11067468Snon	notl	%eax ;							\
11167468Snon	andl	_ipending, %eax ;					\
11267468Snon	jne	2f ; 		/* yes, maybe handle them */		\
11367468Snon1: ;									\
11467468Snon	MEXITCOUNT ;							\
11567468Snon	REL_FAST_INTR_LOCK ;						\
11667468Snon	MAYBE_POPL_ES ;							\
11767468Snon	popl	%ds ;							\
11867468Snon	popl	%edx ;							\
11967468Snon	popl	%ecx ;							\
12067468Snon	popl	%eax ;							\
12167468Snon	iret ;								\
12267468Snon;									\
12367468Snon	ALIGN_TEXT ;							\
12467468Snon2: ;									\
12567468Snon	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
12667468Snon	jae	1b ;		/* no, return */			\
12767468Snon	movl	_cpl, %eax ;						\
12867468Snon	/* XXX next line is probably unnecessary now. */		\
12979697Snon	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
13079697Snon	lock ; 								\
13179697Snon	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
13267468Snon	sti ;			/* to do this as early as possible */	\
13367468Snon	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
13467468Snon	popl	%ecx ;		/* ... original %ds ... */		\
13567468Snon	popl	%edx ;							\
13667468Snon	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
13767468Snon	pushal ;		/* build fat frame (grrr) ... */	\
13867468Snon	pushl	%ecx ;		/* ... actually %ds ... */		\
13967468Snon	pushl	%es ;							\
14067468Snon	movl	$KDSEL, %eax ;						\
14167468Snon	movl	%ax, %es ;						\
14267468Snon	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
14367468Snon	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
14467468Snon	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
14567468Snon	pushl	%eax ;							\
14679697Snon	subl	$4, %esp ;	/* junk for unit number */		\
14779697Snon	MEXITCOUNT ;							\
14879697Snon	jmp	_doreti
14967468Snon
15067468Snon#endif /** FAST_WITHOUTCPL */
15167468Snon
15267468Snon
15367468Snon/*
15467468Snon *
15567468Snon */
15667468Snon#define PUSH_FRAME							\
15767468Snon	pushl	$0 ;		/* dummy error code */			\
15867468Snon	pushl	$0 ;		/* dummy trap type */			\
15967468Snon	pushal ;							\
16067468Snon	pushl	%ds ;		/* save data and extra segments ... */	\
16167468Snon	pushl	%es
16267468Snon
16367468Snon#define POP_FRAME							\
16467468Snon	popl	%es ;							\
16567468Snon	popl	%ds ;							\
16667468Snon	popal ;								\
16767468Snon	addl	$4+4,%esp
16867468Snon
16967468Snon#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
17067468Snon#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
17167468Snon
17267468Snon#define MASK_IRQ(irq_num)						\
17367468Snon	IMASK_LOCK ;				/* into critical reg */	\
17467468Snon	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
17567468Snon	jne	7f ;			/* masked, don't mask */	\
17667468Snon	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
17767468Snon	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
17867468Snon	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
17967468Snon	movl	%eax, (%ecx) ;			/* write the index */	\
18067468Snon	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
18167468Snon	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
18267468Snon	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
18367468Snon7: ;						/* already masked */	\
18467468Snon	IMASK_UNLOCK
18567468Snon/*
18667468Snon * Test to see whether we are handling an edge or level triggered INT.
18767468Snon *  Level-triggered INTs must still be masked as we don't clear the source,
18867468Snon *  and the EOI cycle would cause redundant INTs to occur.
18967468Snon */
19067468Snon#define MASK_LEVEL_IRQ(irq_num)						\
19167468Snon	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
19267468Snon	jz	9f ;				/* edge, don't mask */	\
19367468Snon	MASK_IRQ(irq_num) ;						\
19467468Snon9:
19579697Snon
19679697Snon
19779697Snon#ifdef APIC_INTR_REORDER
19879697Snon#define EOI_IRQ(irq_num)						\
19979697Snon	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
20079697Snon	movl	(%eax), %eax ;						\
20167468Snon	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
20267468Snon	jz	9f ;				/* not active */	\
20367468Snon	movl	$0, lapic_eoi ;						\
20467468Snon	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
20567468Snon9:
20667468Snon
20767468Snon#else
20867468Snon#define EOI_IRQ(irq_num)						\
20967468Snon	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
21079697Snon	jz	9f	;			/* not active */	\
21179697Snon	movl	$0, lapic_eoi;						\
21279697Snon	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
21379697Snon9:
21479697Snon#endif
21579697Snon
21679697Snon
21767468Snon/*
21867468Snon * Test to see if the source is currntly masked, clear if so.
21967468Snon */
22067468Snon#define UNMASK_IRQ(irq_num)					\
22167468Snon	IMASK_LOCK ;				/* into critical reg */	\
222	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
223	je	7f ;			/* bit clear, not masked */	\
224	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
225	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
226	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
227	movl	%eax,(%ecx) ;			/* write the index */	\
228	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
229	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
230	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2317: ;									\
232	IMASK_UNLOCK
233
234#ifdef INTR_SIMPLELOCK
235#define ENLOCK
236#define DELOCK
237#define LATELOCK call	_get_isrlock
238#else
239#define ENLOCK \
240	ISR_TRYLOCK ;		/* XXX this is going away... */		\
241	testl	%eax, %eax ;			/* did we get it? */	\
242	jz	3f
243#define DELOCK	ISR_RELLOCK
244#define LATELOCK
245#endif
246
247#ifdef APIC_INTR_DIAGNOSTIC
248#ifdef APIC_INTR_DIAGNOSTIC_IRQ
249log_intr_event:
250	pushf
251	cli
252	pushl	$CNAME(apic_itrace_debuglock)
253	call	_s_lock_np
254	addl	$4, %esp
255	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
256	andl	$32767, %ecx
257	movl	_cpuid, %eax
258	shll	$8,	%eax
259	orl	8(%esp), %eax
260	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
261	incl	%ecx
262	andl	$32767, %ecx
263	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
264	pushl	$CNAME(apic_itrace_debuglock)
265	call	_s_unlock_np
266	addl	$4, %esp
267	popf
268	ret
269
270
271#define APIC_ITRACE(name, irq_num, id)					\
272	lock ;					/* MP-safe */		\
273	incl	CNAME(name) + (irq_num) * 4 ;				\
274	pushl	%eax ;							\
275	pushl	%ecx ;							\
276	pushl	%edx ;							\
277	movl	$(irq_num), %eax ;					\
278	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
279	jne	7f ;							\
280	pushl	$id ;							\
281	call	log_intr_event ;					\
282	addl	$4, %esp ;						\
2837: ;									\
284	popl	%edx ;							\
285	popl	%ecx ;							\
286	popl	%eax
287#else
288#define APIC_ITRACE(name, irq_num, id)					\
289	lock ;					/* MP-safe */		\
290	incl	CNAME(name) + (irq_num) * 4
291#endif
292
293#define APIC_ITRACE_ENTER 1
294#define APIC_ITRACE_EOI 2
295#define APIC_ITRACE_TRYISRLOCK 3
296#define APIC_ITRACE_GOTISRLOCK 4
297#define APIC_ITRACE_ENTER2 5
298#define APIC_ITRACE_LEAVE 6
299#define APIC_ITRACE_UNMASK 7
300#define APIC_ITRACE_ACTIVE 8
301#define APIC_ITRACE_MASKED 9
302#define APIC_ITRACE_NOISRLOCK 10
303#define APIC_ITRACE_MASKED2 11
304#define APIC_ITRACE_SPLZ 12
305#define APIC_ITRACE_DORETI 13
306
307#else
308#define APIC_ITRACE(name, irq_num, id)
309#endif
310
311#ifdef CPL_AND_CML
312
313#define	INTR(irq_num, vec_name)						\
314	.text ;								\
315	SUPERALIGN_TEXT ;						\
316/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
317IDTVEC(vec_name) ;							\
318	PUSH_FRAME ;							\
319	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
320	movl	%ax, %ds ;						\
321	movl	%ax, %es ;						\
322;									\
323	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
324	lock ;					/* MP-safe */		\
325	btsl	$(irq_num), iactive ;		/* lazy masking */	\
326	jc	1f ;				/* already active */	\
327;									\
328	MASK_LEVEL_IRQ(irq_num) ;					\
329	EOI_IRQ(irq_num) ;						\
3300: ;									\
331	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
332	ENLOCK ;							\
333;									\
334	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
335	AVCPL_LOCK ;				/* MP-safe */		\
336	testl	$IRQ_BIT(irq_num), _cpl ;				\
337	jne	2f ;				/* this INT masked */	\
338	testl	$IRQ_BIT(irq_num), _cml ;				\
339	jne	2f ;				/* this INT masked */	\
340	orl	$IRQ_BIT(irq_num), _cil ;				\
341	AVCPL_UNLOCK ;							\
342;									\
343	incb	_intr_nesting_level ;					\
344;	 								\
345  /* entry point used by doreti_unpend for HWIs. */			\
346__CONCAT(Xresume,irq_num): ;						\
347	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
348	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
349	movl	_intr_countp + (irq_num) * 4, %eax ;			\
350	lock ;	incl	(%eax) ;					\
351;									\
352	AVCPL_LOCK ;				/* MP-safe */		\
353	movl	_cml, %eax ;						\
354	pushl	%eax ;							\
355	orl	_intr_mask + (irq_num) * 4, %eax ;			\
356	movl	%eax, _cml ;						\
357	AVCPL_UNLOCK ;							\
358;									\
359	pushl	_intr_unit + (irq_num) * 4 ;				\
360	incl	_inside_intr ;						\
361	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
362	sti ;								\
363	call	*_intr_handler + (irq_num) * 4 ;			\
364	cli ;								\
365	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
366	decl	_inside_intr ;						\
367;									\
368	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
369	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
370	UNMASK_IRQ(irq_num) ;						\
371	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
372	sti ;				/* doreti repeats cli/sti */	\
373	MEXITCOUNT ;							\
374	LATELOCK ;							\
375	jmp	_doreti ;						\
376;									\
377	ALIGN_TEXT ;							\
3781: ;						/* active */		\
379	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
380	MASK_IRQ(irq_num) ;						\
381	EOI_IRQ(irq_num) ;						\
382	AVCPL_LOCK ;				/* MP-safe */		\
383	orl	$IRQ_BIT(irq_num), _ipending ;				\
384	AVCPL_UNLOCK ;							\
385	lock ;								\
386	btsl	$(irq_num), iactive ;		/* still active */	\
387	jnc	0b ;				/* retry */		\
388	POP_FRAME ;							\
389	iret ;								\
390;									\
391	ALIGN_TEXT ;							\
3922: ;						/* masked by cpl|cml */	\
393	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
394	orl	$IRQ_BIT(irq_num), _ipending ;				\
395	AVCPL_UNLOCK ;							\
396	DELOCK ;		/* XXX this is going away... */		\
397	POP_FRAME ;							\
398	iret ;								\
399	ALIGN_TEXT ;							\
4003: ; 			/* other cpu has isr lock */			\
401	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
402	AVCPL_LOCK ;				/* MP-safe */		\
403	orl	$IRQ_BIT(irq_num), _ipending ;				\
404	testl	$IRQ_BIT(irq_num), _cpl ;				\
405	jne	4f ;				/* this INT masked */	\
406	testl	$IRQ_BIT(irq_num), _cml ;				\
407	jne	4f ;				/* this INT masked */	\
408	orl	$IRQ_BIT(irq_num), _cil ;				\
409	AVCPL_UNLOCK ;							\
410	call	forward_irq ;	/* forward irq to lock holder */	\
411	POP_FRAME ;	 			/* and return */	\
412	iret ;								\
413	ALIGN_TEXT ;							\
4144: ;	 					/* blocked */		\
415	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
416	AVCPL_UNLOCK ;							\
417	POP_FRAME ;	 			/* and return */	\
418	iret
419
420#else /* CPL_AND_CML */
421
422
423#define	INTR(irq_num, vec_name)						\
424	.text ;								\
425	SUPERALIGN_TEXT ;						\
426/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
427IDTVEC(vec_name) ;							\
428	PUSH_FRAME ;							\
429	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
430	movl	%ax, %ds ;						\
431	movl	%ax, %es ;						\
432;									\
433	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
434	lock ;					/* MP-safe */		\
435	btsl	$(irq_num), iactive ;		/* lazy masking */	\
436	jc	1f ;				/* already active */	\
437;									\
438	MASK_LEVEL_IRQ(irq_num) ;					\
439	EOI_IRQ(irq_num) ;						\
4400: ;									\
441	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
442	ISR_TRYLOCK ;		/* XXX this is going away... */		\
443	testl	%eax, %eax ;			/* did we get it? */	\
444	jz	3f ;				/* no */		\
445;									\
446	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
447	AVCPL_LOCK ;				/* MP-safe */		\
448	testl	$IRQ_BIT(irq_num), _cpl ;				\
449	jne	2f ;				/* this INT masked */	\
450	AVCPL_UNLOCK ;							\
451;									\
452	incb	_intr_nesting_level ;					\
453;	 								\
454  /* entry point used by doreti_unpend for HWIs. */			\
455__CONCAT(Xresume,irq_num): ;						\
456	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
457	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
458	movl	_intr_countp + (irq_num) * 4, %eax ;			\
459	lock ;	incl	(%eax) ;					\
460;									\
461	AVCPL_LOCK ;				/* MP-safe */		\
462	movl	_cpl, %eax ;						\
463	pushl	%eax ;							\
464	orl	_intr_mask + (irq_num) * 4, %eax ;			\
465	movl	%eax, _cpl ;						\
466	andl	$~IRQ_BIT(irq_num), _ipending ;				\
467	AVCPL_UNLOCK ;							\
468;									\
469	pushl	_intr_unit + (irq_num) * 4 ;				\
470	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
471	sti ;								\
472	call	*_intr_handler + (irq_num) * 4 ;			\
473	cli ;								\
474	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
475;									\
476	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
477	UNMASK_IRQ(irq_num) ;						\
478	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
479	sti ;				/* doreti repeats cli/sti */	\
480	MEXITCOUNT ;							\
481	jmp	_doreti ;						\
482;									\
483	ALIGN_TEXT ;							\
4841: ;						/* active  */		\
485	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
486	MASK_IRQ(irq_num) ;						\
487	EOI_IRQ(irq_num) ;						\
488	AVCPL_LOCK ;				/* MP-safe */		\
489	orl	$IRQ_BIT(irq_num), _ipending ;				\
490	AVCPL_UNLOCK ;							\
491	lock ;								\
492	btsl	$(irq_num), iactive ;		/* still active */	\
493	jnc	0b ;				/* retry */		\
494	POP_FRAME ;							\
495	iret ;		/* XXX:	 iactive bit might be 0 now */		\
496	ALIGN_TEXT ;							\
4972: ;				/* masked by cpl, leave iactive set */	\
498	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
499	orl	$IRQ_BIT(irq_num), _ipending ;				\
500	AVCPL_UNLOCK ;							\
501	ISR_RELLOCK ;		/* XXX this is going away... */		\
502	POP_FRAME ;							\
503	iret ;								\
504	ALIGN_TEXT ;							\
5053: ; 			/* other cpu has isr lock */			\
506	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
507	AVCPL_LOCK ;				/* MP-safe */		\
508	orl	$IRQ_BIT(irq_num), _ipending ;				\
509	testl	$IRQ_BIT(irq_num), _cpl ;				\
510	jne	4f ;				/* this INT masked */	\
511	AVCPL_UNLOCK ;							\
512	call	forward_irq ;	 /* forward irq to lock holder */	\
513	POP_FRAME ;	 			/* and return */	\
514	iret ;								\
515	ALIGN_TEXT ;							\
5164: ;	 					/* blocked */		\
517	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
518	AVCPL_UNLOCK ;							\
519	POP_FRAME ;	 			/* and return */	\
520	iret
521
522#endif /* CPL_AND_CML */
523
524
525/*
526 * Handle "spurious INTerrupts".
527 * Notes:
528 *  This is different than the "spurious INTerrupt" generated by an
529 *   8259 PIC for missing INTs.  See the APIC documentation for details.
530 *  This routine should NOT do an 'EOI' cycle.
531 */
532	.text
533	SUPERALIGN_TEXT
534	.globl _Xspuriousint
535_Xspuriousint:
536
537	/* No EOI cycle used here */
538
539	iret
540
541
542/*
543 * Handle TLB shootdowns.
544 */
545	.text
546	SUPERALIGN_TEXT
547	.globl	_Xinvltlb
548_Xinvltlb:
549	pushl	%eax
550
551#ifdef COUNT_XINVLTLB_HITS
552	ss
553	movl	_cpuid, %eax
554	ss
555	incl	_xhits(,%eax,4)
556#endif /* COUNT_XINVLTLB_HITS */
557
558	movl	%cr3, %eax		/* invalidate the TLB */
559	movl	%eax, %cr3
560
561	ss				/* stack segment, avoid %ds load */
562	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
563
564	popl	%eax
565	iret
566
567
568#ifdef BETTER_CLOCK
569
570/*
571 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
572 *
573 *  - Stores current cpu state in checkstate_cpustate[cpuid]
574 *      0 == user, 1 == sys, 2 == intr
575 *  - Stores current process in checkstate_curproc[cpuid]
576 *
577 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
578 *
579 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags
580 */
581
582	.text
583	SUPERALIGN_TEXT
584	.globl _Xcpucheckstate
585	.globl _checkstate_cpustate
586	.globl _checkstate_curproc
587	.globl _checkstate_pc
588_Xcpucheckstate:
589	pushl	%eax
590	pushl	%ebx
591	pushl	%ds			/* save current data segment */
592
593	movl	$KDSEL, %eax
594	movl	%ax, %ds		/* use KERNEL data segment */
595
596	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
597
598	movl	$0, %ebx
599	movl	16(%esp), %eax
600	andl	$3, %eax
601	cmpl	$3, %eax
602	je	1f
603#ifdef VM86
604	testl	$PSL_VM, 20(%esp)
605	jne	1f
606#endif
607	incl	%ebx			/* system or interrupt */
608#ifdef CPL_AND_CML
609	cmpl	$0, _inside_intr
610	je	1f
611	incl	%ebx			/* interrupt */
612#endif
6131:
614	movl	_cpuid, %eax
615	movl	%ebx, _checkstate_cpustate(,%eax,4)
616	movl	_curproc, %ebx
617	movl	%ebx, _checkstate_curproc(,%eax,4)
618	movl	12(%esp), %ebx
619	movl	%ebx, _checkstate_pc(,%eax,4)
620
621	lock				/* checkstate_probed_cpus |= (1<<id) */
622	btsl	%eax, _checkstate_probed_cpus
623
624	popl	%ds			/* restore previous data segment */
625	popl	%ebx
626	popl	%eax
627	iret
628
629#endif /* BETTER_CLOCK */
630
631/*
632 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
633 *
634 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
635 *
636 *  - We need a better method of triggering asts on other cpus.
637 */
638
639	.text
640	SUPERALIGN_TEXT
641	.globl _Xcpuast
642_Xcpuast:
643	PUSH_FRAME
644	movl	$KDSEL, %eax
645	movl	%ax, %ds		/* use KERNEL data segment */
646	movl	%ax, %es
647
648	movl	_cpuid, %eax
649	lock				/* checkstate_need_ast &= ~(1<<id) */
650	btrl	%eax, _checkstate_need_ast
651	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
652
653	lock
654	btsl	%eax, _checkstate_pending_ast
655	jc	1f
656
657	FAKE_MCOUNT(12*4(%esp))
658
659	/*
660	 * Giant locks do not come cheap.
661	 * A lot of cycles are going to be wasted here.
662	 */
663	call	_get_isrlock
664
665	AVCPL_LOCK
666#ifdef CPL_AND_CML
667	movl	_cml, %eax
668#else
669	movl	_cpl, %eax
670#endif
671	pushl	%eax
672	orl	$SWI_AST_PENDING, _ipending
673	AVCPL_UNLOCK
674	lock
675	incb	_intr_nesting_level
676	sti
677
678	pushl	$0
679
680	movl	_cpuid, %eax
681	lock
682	btrl	%eax, _checkstate_pending_ast
683	lock
684	btrl	%eax, CNAME(resched_cpus)
685	jnc	2f
686	movl	$1, CNAME(want_resched)
687	lock
688	incl	CNAME(want_resched_cnt)
6892:
690	lock
691	incl	CNAME(cpuast_cnt)
692	MEXITCOUNT
693	jmp	_doreti
6941:
695	/* We are already in the process of delivering an ast for this CPU */
696	POP_FRAME
697	iret
698
699
700/*
701 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
702 */
703
704	.text
705	SUPERALIGN_TEXT
706	.globl _Xforward_irq
707_Xforward_irq:
708	PUSH_FRAME
709	movl	$KDSEL, %eax
710	movl	%ax, %ds		/* use KERNEL data segment */
711	movl	%ax, %es
712
713	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
714
715	FAKE_MCOUNT(12*4(%esp))
716
717	ISR_TRYLOCK
718	testl	%eax,%eax		/* Did we get the lock ? */
719	jz  1f				/* No */
720
721	lock
722	incl	CNAME(forward_irq_hitcnt)
723	cmpb	$4, _intr_nesting_level
724	jae	2f
725
726	AVCPL_LOCK
727#ifdef CPL_AND_CML
728	movl	_cml, %eax
729#else
730	movl	_cpl, %eax
731#endif
732	pushl	%eax
733	AVCPL_UNLOCK
734	lock
735	incb	_intr_nesting_level
736	sti
737
738	pushl	$0
739
740	MEXITCOUNT
741	jmp	_doreti			/* Handle forwarded interrupt */
7421:
743	lock
744	incl	CNAME(forward_irq_misscnt)
745	call	forward_irq	/* Oops, we've lost the isr lock */
746	MEXITCOUNT
747	POP_FRAME
748	iret
7492:
750	lock
751	incl	CNAME(forward_irq_toodeepcnt)
7523:
753	ISR_RELLOCK
754	MEXITCOUNT
755	POP_FRAME
756	iret
757
758/*
759 *
760 */
761forward_irq:
762	MCOUNT
763	cmpl	$0,_invltlb_ok
764	jz	4f
765
766	cmpl	$0, CNAME(forward_irq_enabled)
767	jz	4f
768
769	movl	_mp_lock,%eax
770	cmpl	$FREE_LOCK,%eax
771	jne	1f
772	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
7731:
774	shrl	$24,%eax
775	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
776	shll	$24,%ecx
777	movl	lapic_icr_hi, %eax
778	andl	$~APIC_ID_MASK, %eax
779	orl	%ecx, %eax
780	movl	%eax, lapic_icr_hi
781
7822:
783	movl	lapic_icr_lo, %eax
784	andl	$APIC_DELSTAT_MASK,%eax
785	jnz	2b
786	movl	lapic_icr_lo, %eax
787	andl	$APIC_RESV2_MASK, %eax
788	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
789	movl	%eax, lapic_icr_lo
7903:
791	movl	lapic_icr_lo, %eax
792	andl	$APIC_DELSTAT_MASK,%eax
793	jnz	3b
7944:
795	ret
796
797/*
798 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
799 *
800 *  - Signals its receipt.
801 *  - Waits for permission to restart.
802 *  - Signals its restart.
803 */
804
805	.text
806	SUPERALIGN_TEXT
807	.globl _Xcpustop
808_Xcpustop:
809	pushl	%ebp
810	movl	%esp, %ebp
811	pushl	%eax
812	pushl	%ecx
813	pushl	%edx
814	pushl	%ds			/* save current data segment */
815	pushl	%es
816
817	movl	$KDSEL, %eax
818	movl	%ax, %ds		/* use KERNEL data segment */
819
820	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
821
822	movl	_cpuid, %eax
823	imull	$PCB_SIZE, %eax
824	leal	CNAME(stoppcbs)(%eax), %eax
825	pushl	%eax
826	call	CNAME(savectx)		/* Save process context */
827	addl	$4, %esp
828
829
830	movl	_cpuid, %eax
831
832	lock
833	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8341:
835	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
836	jnc	1b
837
838	lock
839	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
840	lock
841	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
842
843	test	%eax, %eax
844	jnz	2f
845
846	movl	CNAME(cpustop_restartfunc), %eax
847	test	%eax, %eax
848	jz	2f
849	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
850
851	call	%eax
8522:
853	popl	%es
854	popl	%ds			/* restore previous data segment */
855	popl	%edx
856	popl	%ecx
857	popl	%eax
858	movl	%ebp, %esp
859	popl	%ebp
860	iret
861
862
863MCOUNT_LABEL(bintr)
864	FAST_INTR(0,fastintr0)
865	FAST_INTR(1,fastintr1)
866	FAST_INTR(2,fastintr2)
867	FAST_INTR(3,fastintr3)
868	FAST_INTR(4,fastintr4)
869	FAST_INTR(5,fastintr5)
870	FAST_INTR(6,fastintr6)
871	FAST_INTR(7,fastintr7)
872	FAST_INTR(8,fastintr8)
873	FAST_INTR(9,fastintr9)
874	FAST_INTR(10,fastintr10)
875	FAST_INTR(11,fastintr11)
876	FAST_INTR(12,fastintr12)
877	FAST_INTR(13,fastintr13)
878	FAST_INTR(14,fastintr14)
879	FAST_INTR(15,fastintr15)
880	FAST_INTR(16,fastintr16)
881	FAST_INTR(17,fastintr17)
882	FAST_INTR(18,fastintr18)
883	FAST_INTR(19,fastintr19)
884	FAST_INTR(20,fastintr20)
885	FAST_INTR(21,fastintr21)
886	FAST_INTR(22,fastintr22)
887	FAST_INTR(23,fastintr23)
888	INTR(0,intr0)
889	INTR(1,intr1)
890	INTR(2,intr2)
891	INTR(3,intr3)
892	INTR(4,intr4)
893	INTR(5,intr5)
894	INTR(6,intr6)
895	INTR(7,intr7)
896	INTR(8,intr8)
897	INTR(9,intr9)
898	INTR(10,intr10)
899	INTR(11,intr11)
900	INTR(12,intr12)
901	INTR(13,intr13)
902	INTR(14,intr14)
903	INTR(15,intr15)
904	INTR(16,intr16)
905	INTR(17,intr17)
906	INTR(18,intr18)
907	INTR(19,intr19)
908	INTR(20,intr20)
909	INTR(21,intr21)
910	INTR(22,intr22)
911	INTR(23,intr23)
912MCOUNT_LABEL(eintr)
913
914	.data
915/*
916 * Addresses of interrupt handlers.
917 *  XresumeNN: Resumption addresses for HWIs.
918 */
919	.globl _ihandlers
920_ihandlers:
921/*
922 * used by:
923 *  ipl.s:	doreti_unpend
924 */
925	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
926	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
927	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
928	.long	Xresume12, Xresume13, Xresume14, Xresume15
929	.long	Xresume16, Xresume17, Xresume18, Xresume19
930	.long	Xresume20, Xresume21, Xresume22, Xresume23
931/*
932 * used by:
933 *  ipl.s:	doreti_unpend
934 *  apic_ipl.s:	splz_unpend
935 */
936	.long	_swi_null, swi_net, _swi_null, _swi_null
937	.long	_swi_vm, _swi_null, _softclock, swi_ast
938
939imasks:				/* masks for interrupt handlers */
940	.space	NHWI*4		/* padding; HWI masks are elsewhere */
941
942	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
943	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
944
945/* active flag for lazy masking */
946iactive:
947	.long	0
948
949#ifdef COUNT_XINVLTLB_HITS
950	.globl	_xhits
951_xhits:
952	.space	(NCPU * 4), 0
953#endif /* COUNT_XINVLTLB_HITS */
954
955/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
956	.globl _stopped_cpus, _started_cpus
957_stopped_cpus:
958	.long	0
959_started_cpus:
960	.long	0
961
962#ifdef BETTER_CLOCK
963	.globl _checkstate_probed_cpus
964_checkstate_probed_cpus:
965	.long	0
966#endif /* BETTER_CLOCK */
967	.globl _checkstate_need_ast
968_checkstate_need_ast:
969	.long	0
970_checkstate_pending_ast:
971	.long	0
972	.globl CNAME(forward_irq_misscnt)
973	.globl CNAME(forward_irq_toodeepcnt)
974	.globl CNAME(forward_irq_hitcnt)
975	.globl CNAME(resched_cpus)
976	.globl CNAME(want_resched_cnt)
977	.globl CNAME(cpuast_cnt)
978	.globl CNAME(cpustop_restartfunc)
979CNAME(forward_irq_misscnt):
980	.long 0
981CNAME(forward_irq_hitcnt):
982	.long 0
983CNAME(forward_irq_toodeepcnt):
984	.long 0
985CNAME(resched_cpus):
986	.long 0
987CNAME(want_resched_cnt):
988	.long 0
989CNAME(cpuast_cnt):
990	.long 0
991CNAME(cpustop_restartfunc):
992	.long 0
993
994
995
996	.globl	_apic_pin_trigger
997_apic_pin_trigger:
998	.long	0
999
1000
1001/*
1002 * Interrupt counters and names.  The format of these and the label names
1003 * must agree with what vmstat expects.  The tables are indexed by device
1004 * ids so that we don't have to move the names around as devices are
1005 * attached.
1006 */
1007#include "vector.h"
1008	.globl	_intrcnt, _eintrcnt
1009_intrcnt:
1010	.space	(NR_DEVICES + ICU_LEN) * 4
1011_eintrcnt:
1012
1013	.globl	_intrnames, _eintrnames
1014_intrnames:
1015	.ascii	DEVICE_NAMES
1016	.asciz	"stray irq0"
1017	.asciz	"stray irq1"
1018	.asciz	"stray irq2"
1019	.asciz	"stray irq3"
1020	.asciz	"stray irq4"
1021	.asciz	"stray irq5"
1022	.asciz	"stray irq6"
1023	.asciz	"stray irq7"
1024	.asciz	"stray irq8"
1025	.asciz	"stray irq9"
1026	.asciz	"stray irq10"
1027	.asciz	"stray irq11"
1028	.asciz	"stray irq12"
1029	.asciz	"stray irq13"
1030	.asciz	"stray irq14"
1031	.asciz	"stray irq15"
1032	.asciz	"stray irq16"
1033	.asciz	"stray irq17"
1034	.asciz	"stray irq18"
1035	.asciz	"stray irq19"
1036	.asciz	"stray irq20"
1037	.asciz	"stray irq21"
1038	.asciz	"stray irq22"
1039	.asciz	"stray irq23"
1040_eintrnames:
1041
1042	.text
1043