apic_vector.s revision 36135
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.29 1998/04/22 22:49:27 tegge Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	movl	$KDSEL,%eax ;						\
62	movl	%ax,%ds ;						\
63	MAYBE_MOVW_AX_ES ;						\
64	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
65	pushl	_intr_unit + (irq_num) * 4 ;				\
66	GET_FAST_INTR_LOCK ;						\
67	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68	REL_FAST_INTR_LOCK ;						\
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	MAYBE_POPL_ES ;							\
78	popl	%ds ;							\
79	popl	%edx ;							\
80	popl	%ecx ;							\
81	popl	%eax ;							\
82	iret
83
84#else /* FAST_WITHOUTCPL */
85
86#define	FAST_INTR(irq_num, vec_name)					\
87	.text ;								\
88	SUPERALIGN_TEXT ;						\
89IDTVEC(vec_name) ;							\
90	pushl	%eax ;		/* save only call-used registers */	\
91	pushl	%ecx ;							\
92	pushl	%edx ;							\
93	pushl	%ds ;							\
94	MAYBE_PUSHL_ES ;						\
95	movl	$KDSEL, %eax ;						\
96	movl	%ax, %ds ;						\
97	MAYBE_MOVW_AX_ES ;						\
98	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
99	GET_FAST_INTR_LOCK ;						\
100	pushl	_intr_unit + (irq_num) * 4 ;				\
101	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
102	addl	$4, %esp ;						\
103	movl	$0, lapic_eoi ;						\
104	lock ; 								\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	lock ; 								\
108	incl	(%eax) ;						\
109	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
110	notl	%eax ;							\
111	andl	_ipending, %eax ;					\
112	jne	2f ; 		/* yes, maybe handle them */		\
1131: ;									\
114	MEXITCOUNT ;							\
115	REL_FAST_INTR_LOCK ;						\
116	MAYBE_POPL_ES ;							\
117	popl	%ds ;							\
118	popl	%edx ;							\
119	popl	%ecx ;							\
120	popl	%eax ;							\
121	iret ;								\
122;									\
123	ALIGN_TEXT ;							\
1242: ;									\
125	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
126	jae	1b ;		/* no, return */			\
127	movl	_cpl, %eax ;						\
128	/* XXX next line is probably unnecessary now. */		\
129	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
130	lock ; 								\
131	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
132	sti ;			/* to do this as early as possible */	\
133	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
134	popl	%ecx ;		/* ... original %ds ... */		\
135	popl	%edx ;							\
136	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
137	pushal ;		/* build fat frame (grrr) ... */	\
138	pushl	%ecx ;		/* ... actually %ds ... */		\
139	pushl	%es ;							\
140	movl	$KDSEL, %eax ;						\
141	movl	%ax, %es ;						\
142	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
143	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
144	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
145	pushl	%eax ;							\
146	subl	$4, %esp ;	/* junk for unit number */		\
147	MEXITCOUNT ;							\
148	jmp	_doreti
149
150#endif /** FAST_WITHOUTCPL */
151
152
153/*
154 *
155 */
156#define PUSH_FRAME							\
157	pushl	$0 ;		/* dummy error code */			\
158	pushl	$0 ;		/* dummy trap type */			\
159	pushal ;							\
160	pushl	%ds ;		/* save data and extra segments ... */	\
161	pushl	%es
162
163#define POP_FRAME							\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp
168
169#define MASK_IRQ(irq_num)						\
170	IMASK_LOCK ;				/* into critical reg */	\
171	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
172	jne	7f ;			/* masked, don't mask */	\
173	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
174	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
175	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
176	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
177	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
178	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1797: ;						/* already masked */	\
180	IMASK_UNLOCK
181/*
182 * Test to see whether we are handling an edge or level triggered INT.
183 *  Level-triggered INTs must still be masked as we don't clear the source,
184 *  and the EOI cycle would cause redundant INTs to occur.
185 */
186#define MASK_LEVEL_IRQ(irq_num)						\
187	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
188	jz	9f ;				/* edge, don't mask */	\
189	MASK_IRQ(irq_num) ;						\
1909:
191
192
193#ifdef APIC_INTR_REORDER
194#define EOI_IRQ(irq_num)						\
195	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
196	movl	(%eax), %eax ;						\
197	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
198	jz	9f ;				/* not active */	\
199	movl	$0, lapic_eoi ;						\
200	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2019:
202
203#else
204#define EOI_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
206	jz	9f	;			/* not active */	\
207	movl	$0, lapic_eoi;						\
208	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2099:
210#endif
211
212
213/*
214 * Test to see if the source is currntly masked, clear if so.
215 */
216#define UNMASK_IRQ(irq_num)					\
217	IMASK_LOCK ;				/* into critical reg */	\
218	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
219	je	7f ;			/* bit clear, not masked */	\
220	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
221	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
222	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
223	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
224	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
225	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2267: ;									\
227	IMASK_UNLOCK
228
229#ifdef INTR_SIMPLELOCK
230#define ENLOCK
231#define DELOCK
232#define LATELOCK call	_get_isrlock
233#else
234#define ENLOCK \
235	ISR_TRYLOCK ;		/* XXX this is going away... */		\
236	testl	%eax, %eax ;			/* did we get it? */	\
237	jz	3f
238#define DELOCK	ISR_RELLOCK
239#define LATELOCK
240#endif
241
242#ifdef APIC_INTR_DIAGNOSTIC
243#ifdef APIC_INTR_DIAGNOSTIC_IRQ
244log_intr_event:
245	pushf
246	cli
247	pushl	$CNAME(apic_itrace_debuglock)
248	call	_s_lock_np
249	addl	$4, %esp
250	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
251	andl	$32767, %ecx
252	movl	_cpuid, %eax
253	shll	$8,	%eax
254	orl	8(%esp), %eax
255	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
256	incl	%ecx
257	andl	$32767, %ecx
258	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
259	pushl	$CNAME(apic_itrace_debuglock)
260	call	_s_unlock_np
261	addl	$4, %esp
262	popf
263	ret
264
265
266#define APIC_ITRACE(name, irq_num, id)					\
267	lock ;					/* MP-safe */		\
268	incl	CNAME(name) + (irq_num) * 4 ;				\
269	pushl	%eax ;							\
270	pushl	%ecx ;							\
271	pushl	%edx ;							\
272	movl	$(irq_num), %eax ;					\
273	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
274	jne	7f ;							\
275	pushl	$id ;							\
276	call	log_intr_event ;					\
277	addl	$4, %esp ;						\
2787: ;									\
279	popl	%edx ;							\
280	popl	%ecx ;							\
281	popl	%eax
282#else
283#define APIC_ITRACE(name, irq_num, id)					\
284	lock ;					/* MP-safe */		\
285	incl	CNAME(name) + (irq_num) * 4
286#endif
287
288#define APIC_ITRACE_ENTER 1
289#define APIC_ITRACE_EOI 2
290#define APIC_ITRACE_TRYISRLOCK 3
291#define APIC_ITRACE_GOTISRLOCK 4
292#define APIC_ITRACE_ENTER2 5
293#define APIC_ITRACE_LEAVE 6
294#define APIC_ITRACE_UNMASK 7
295#define APIC_ITRACE_ACTIVE 8
296#define APIC_ITRACE_MASKED 9
297#define APIC_ITRACE_NOISRLOCK 10
298#define APIC_ITRACE_MASKED2 11
299#define APIC_ITRACE_SPLZ 12
300#define APIC_ITRACE_DORETI 13
301
302#else
303#define APIC_ITRACE(name, irq_num, id)
304#endif
305
306#ifdef CPL_AND_CML
307
308#define	INTR(irq_num, vec_name)						\
309	.text ;								\
310	SUPERALIGN_TEXT ;						\
311/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
312IDTVEC(vec_name) ;							\
313	PUSH_FRAME ;							\
314	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
315	movl	%ax, %ds ;						\
316	movl	%ax, %es ;						\
317;									\
318	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
319	lock ;					/* MP-safe */		\
320	btsl	$(irq_num), iactive ;		/* lazy masking */	\
321	jc	1f ;				/* already active */	\
322;									\
323	MASK_LEVEL_IRQ(irq_num) ;					\
324	EOI_IRQ(irq_num) ;						\
3250: ;									\
326	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
327	ENLOCK ;							\
328;									\
329	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
330	AVCPL_LOCK ;				/* MP-safe */		\
331	testl	$IRQ_BIT(irq_num), _cpl ;				\
332	jne	2f ;				/* this INT masked */	\
333	testl	$IRQ_BIT(irq_num), _cml ;				\
334	jne	2f ;				/* this INT masked */	\
335	orl	$IRQ_BIT(irq_num), _cil ;				\
336	AVCPL_UNLOCK ;							\
337;									\
338	incb	_intr_nesting_level ;					\
339;	 								\
340  /* entry point used by doreti_unpend for HWIs. */			\
341__CONCAT(Xresume,irq_num): ;						\
342	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
343	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
344	movl	_intr_countp + (irq_num) * 4, %eax ;			\
345	lock ;	incl	(%eax) ;					\
346;									\
347	AVCPL_LOCK ;				/* MP-safe */		\
348	movl	_cml, %eax ;						\
349	pushl	%eax ;							\
350	orl	_intr_mask + (irq_num) * 4, %eax ;			\
351	movl	%eax, _cml ;						\
352	AVCPL_UNLOCK ;							\
353;									\
354	pushl	_intr_unit + (irq_num) * 4 ;				\
355	incl	_inside_intr ;						\
356	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
357	sti ;								\
358	call	*_intr_handler + (irq_num) * 4 ;			\
359	cli ;								\
360	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
361	decl	_inside_intr ;						\
362;									\
363	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
364	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
365	UNMASK_IRQ(irq_num) ;						\
366	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
367	sti ;				/* doreti repeats cli/sti */	\
368	MEXITCOUNT ;							\
369	LATELOCK ;							\
370	jmp	_doreti ;						\
371;									\
372	ALIGN_TEXT ;							\
3731: ;						/* active */		\
374	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
375	MASK_IRQ(irq_num) ;						\
376	EOI_IRQ(irq_num) ;						\
377	AVCPL_LOCK ;				/* MP-safe */		\
378	orl	$IRQ_BIT(irq_num), _ipending ;				\
379	AVCPL_UNLOCK ;							\
380	lock ;								\
381	btsl	$(irq_num), iactive ;		/* still active */	\
382	jnc	0b ;				/* retry */		\
383	POP_FRAME ;							\
384	iret ;								\
385;									\
386	ALIGN_TEXT ;							\
3872: ;						/* masked by cpl|cml */	\
388	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
389	orl	$IRQ_BIT(irq_num), _ipending ;				\
390	AVCPL_UNLOCK ;							\
391	DELOCK ;		/* XXX this is going away... */		\
392	POP_FRAME ;							\
393	iret ;								\
394	ALIGN_TEXT ;							\
3953: ; 			/* other cpu has isr lock */			\
396	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
397	AVCPL_LOCK ;				/* MP-safe */		\
398	orl	$IRQ_BIT(irq_num), _ipending ;				\
399	testl	$IRQ_BIT(irq_num), _cpl ;				\
400	jne	4f ;				/* this INT masked */	\
401	testl	$IRQ_BIT(irq_num), _cml ;				\
402	jne	4f ;				/* this INT masked */	\
403	orl	$IRQ_BIT(irq_num), _cil ;				\
404	AVCPL_UNLOCK ;							\
405	call	forward_irq ;	/* forward irq to lock holder */	\
406	POP_FRAME ;	 			/* and return */	\
407	iret ;								\
408	ALIGN_TEXT ;							\
4094: ;	 					/* blocked */		\
410	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
411	AVCPL_UNLOCK ;							\
412	POP_FRAME ;	 			/* and return */	\
413	iret
414
415#else /* CPL_AND_CML */
416
417
418#define	INTR(irq_num, vec_name)						\
419	.text ;								\
420	SUPERALIGN_TEXT ;						\
421/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
422IDTVEC(vec_name) ;							\
423	PUSH_FRAME ;							\
424	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
425	movl	%ax, %ds ;						\
426	movl	%ax, %es ;						\
427;									\
428	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
429	lock ;					/* MP-safe */		\
430	btsl	$(irq_num), iactive ;		/* lazy masking */	\
431	jc	1f ;				/* already active */	\
432;									\
433	MASK_LEVEL_IRQ(irq_num) ;					\
434	EOI_IRQ(irq_num) ;						\
4350: ;									\
436	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
437	ISR_TRYLOCK ;		/* XXX this is going away... */		\
438	testl	%eax, %eax ;			/* did we get it? */	\
439	jz	3f ;				/* no */		\
440;									\
441	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
442	AVCPL_LOCK ;				/* MP-safe */		\
443	testl	$IRQ_BIT(irq_num), _cpl ;				\
444	jne	2f ;				/* this INT masked */	\
445	AVCPL_UNLOCK ;							\
446;									\
447	incb	_intr_nesting_level ;					\
448;	 								\
449  /* entry point used by doreti_unpend for HWIs. */			\
450__CONCAT(Xresume,irq_num): ;						\
451	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
452	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
453	movl	_intr_countp + (irq_num) * 4, %eax ;			\
454	lock ;	incl	(%eax) ;					\
455;									\
456	AVCPL_LOCK ;				/* MP-safe */		\
457	movl	_cpl, %eax ;						\
458	pushl	%eax ;							\
459	orl	_intr_mask + (irq_num) * 4, %eax ;			\
460	movl	%eax, _cpl ;						\
461	andl	$~IRQ_BIT(irq_num), _ipending ;				\
462	AVCPL_UNLOCK ;							\
463;									\
464	pushl	_intr_unit + (irq_num) * 4 ;				\
465	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
466	sti ;								\
467	call	*_intr_handler + (irq_num) * 4 ;			\
468	cli ;								\
469	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
470;									\
471	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
472	UNMASK_IRQ(irq_num) ;						\
473	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
474	sti ;				/* doreti repeats cli/sti */	\
475	MEXITCOUNT ;							\
476	jmp	_doreti ;						\
477;									\
478	ALIGN_TEXT ;							\
4791: ;						/* active  */		\
480	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
481	MASK_IRQ(irq_num) ;						\
482	EOI_IRQ(irq_num) ;						\
483	AVCPL_LOCK ;				/* MP-safe */		\
484	orl	$IRQ_BIT(irq_num), _ipending ;				\
485	AVCPL_UNLOCK ;							\
486	lock ;								\
487	btsl	$(irq_num), iactive ;		/* still active */	\
488	jnc	0b ;				/* retry */		\
489	POP_FRAME ;							\
490	iret ;		/* XXX:	 iactive bit might be 0 now */		\
491	ALIGN_TEXT ;							\
4922: ;				/* masked by cpl, leave iactive set */	\
493	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
494	orl	$IRQ_BIT(irq_num), _ipending ;				\
495	AVCPL_UNLOCK ;							\
496	ISR_RELLOCK ;		/* XXX this is going away... */		\
497	POP_FRAME ;							\
498	iret ;								\
499	ALIGN_TEXT ;							\
5003: ; 			/* other cpu has isr lock */			\
501	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
502	AVCPL_LOCK ;				/* MP-safe */		\
503	orl	$IRQ_BIT(irq_num), _ipending ;				\
504	testl	$IRQ_BIT(irq_num), _cpl ;				\
505	jne	4f ;				/* this INT masked */	\
506	AVCPL_UNLOCK ;							\
507	call	forward_irq ;	 /* forward irq to lock holder */	\
508	POP_FRAME ;	 			/* and return */	\
509	iret ;								\
510	ALIGN_TEXT ;							\
5114: ;	 					/* blocked */		\
512	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
513	AVCPL_UNLOCK ;							\
514	POP_FRAME ;	 			/* and return */	\
515	iret
516
517#endif /* CPL_AND_CML */
518
519
520/*
521 * Handle "spurious INTerrupts".
522 * Notes:
523 *  This is different than the "spurious INTerrupt" generated by an
524 *   8259 PIC for missing INTs.  See the APIC documentation for details.
525 *  This routine should NOT do an 'EOI' cycle.
526 */
527	.text
528	SUPERALIGN_TEXT
529	.globl _Xspuriousint
530_Xspuriousint:
531
532	/* No EOI cycle used here */
533
534	iret
535
536
537/*
538 * Handle TLB shootdowns.
539 */
540	.text
541	SUPERALIGN_TEXT
542	.globl	_Xinvltlb
543_Xinvltlb:
544	pushl	%eax
545
546#ifdef COUNT_XINVLTLB_HITS
547	ss
548	movl	_cpuid, %eax
549	ss
550	incl	_xhits(,%eax,4)
551#endif /* COUNT_XINVLTLB_HITS */
552
553	movl	%cr3, %eax		/* invalidate the TLB */
554	movl	%eax, %cr3
555
556	ss				/* stack segment, avoid %ds load */
557	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
558
559	popl	%eax
560	iret
561
562
563#ifdef BETTER_CLOCK
564
565/*
566 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
567 *
568 *  - Stores current cpu state in checkstate_cpustate[cpuid]
569 *      0 == user, 1 == sys, 2 == intr
570 *  - Stores current process in checkstate_curproc[cpuid]
571 *
572 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
573 *
574 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags
575 */
576
577	.text
578	SUPERALIGN_TEXT
579	.globl _Xcpucheckstate
580	.globl _checkstate_cpustate
581	.globl _checkstate_curproc
582	.globl _checkstate_pc
583_Xcpucheckstate:
584	pushl	%eax
585	pushl	%ebx
586	pushl	%ds			/* save current data segment */
587
588	movl	$KDSEL, %eax
589	movl	%ax, %ds		/* use KERNEL data segment */
590
591	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
592
593	movl	$0, %ebx
594	movl	16(%esp), %eax
595	andl	$3, %eax
596	cmpl	$3, %eax
597	je	1f
598#ifdef VM86
599	testl	$PSL_VM, 20(%esp)
600	jne	1f
601#endif
602	incl	%ebx			/* system or interrupt */
603#ifdef CPL_AND_CML
604	cmpl	$0, _inside_intr
605	je	1f
606	incl	%ebx			/* interrupt */
607#endif
6081:
609	movl	_cpuid, %eax
610	movl	%ebx, _checkstate_cpustate(,%eax,4)
611	movl	_curproc, %ebx
612	movl	%ebx, _checkstate_curproc(,%eax,4)
613	movl	12(%esp), %ebx
614	movl	%ebx, _checkstate_pc(,%eax,4)
615
616	lock				/* checkstate_probed_cpus |= (1<<id) */
617	btsl	%eax, _checkstate_probed_cpus
618
619	popl	%ds			/* restore previous data segment */
620	popl	%ebx
621	popl	%eax
622	iret
623
624#endif /* BETTER_CLOCK */
625
626/*
627 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
628 *
629 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
630 *
631 *  - We need a better method of triggering asts on other cpus.
632 */
633
634	.text
635	SUPERALIGN_TEXT
636	.globl _Xcpuast
637_Xcpuast:
638	PUSH_FRAME
639	movl	$KDSEL, %eax
640	movl	%ax, %ds		/* use KERNEL data segment */
641	movl	%ax, %es
642
643	movl	_cpuid, %eax
644	lock				/* checkstate_need_ast &= ~(1<<id) */
645	btrl	%eax, _checkstate_need_ast
646	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
647
648	lock
649	btsl	%eax, _checkstate_pending_ast
650	jc	1f
651
652	FAKE_MCOUNT(12*4(%esp))
653
654	/*
655	 * Giant locks do not come cheap.
656	 * A lot of cycles are going to be wasted here.
657	 */
658	call	_get_isrlock
659
660	AVCPL_LOCK
661#ifdef CPL_AND_CML
662	movl	_cml, %eax
663#else
664	movl	_cpl, %eax
665#endif
666	pushl	%eax
667	orl	$SWI_AST_PENDING, _ipending
668	AVCPL_UNLOCK
669	lock
670	incb	_intr_nesting_level
671	sti
672
673	pushl	$0
674
675	movl	_cpuid, %eax
676	lock
677	btrl	%eax, _checkstate_pending_ast
678	lock
679	btrl	%eax, CNAME(resched_cpus)
680	jz	2f
681	movl	$1, CNAME(want_resched)
682	lock
683	incl	CNAME(want_resched_cnt)
6842:
685	lock
686	incl	CNAME(cpuast_cnt)
687	MEXITCOUNT
688	jmp	_doreti
6891:
690	/* We are already in the process of delivering an ast for this CPU */
691	POP_FRAME
692	iret
693
694
695/*
696 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
697 */
698
699	.text
700	SUPERALIGN_TEXT
701	.globl _Xforward_irq
702_Xforward_irq:
703	PUSH_FRAME
704	movl	$KDSEL, %eax
705	movl	%ax, %ds		/* use KERNEL data segment */
706	movl	%ax, %es
707
708	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
709
710	FAKE_MCOUNT(12*4(%esp))
711
712	ISR_TRYLOCK
713	testl	%eax,%eax		/* Did we get the lock ? */
714	jz  1f				/* No */
715
716	lock
717	incl	CNAME(forward_irq_hitcnt)
718	cmpb	$4, _intr_nesting_level
719	jae	2f
720
721	jmp	3f
722
723	AVCPL_LOCK
724#ifdef CPL_AND_CML
725	movl	_cml, %eax
726#else
727	movl	_cpl, %eax
728#endif
729	pushl	%eax
730	AVCPL_UNLOCK
731	lock
732	incb	_intr_nesting_level
733	sti
734
735	pushl	$0
736
737	MEXITCOUNT
738	jmp	_doreti			/* Handle forwarded interrupt */
7394:
740	lock
741	decb	_intr_nesting_level
742	ISR_RELLOCK
743	MEXITCOUNT
744	addl	$8, %esp
745	POP_FRAME
746	iret
7471:
748	lock
749	incl	CNAME(forward_irq_misscnt)
750	call	forward_irq	/* Oops, we've lost the isr lock */
751	MEXITCOUNT
752	POP_FRAME
753	iret
7542:
755	lock
756	incl	CNAME(forward_irq_toodeepcnt)
7573:
758	ISR_RELLOCK
759	MEXITCOUNT
760	POP_FRAME
761	iret
762
763/*
764 *
765 */
766forward_irq:
767	MCOUNT
768	cmpl	$0,_invltlb_ok
769	jz	4f
770
771	cmpl	$0, CNAME(forward_irq_enabled)
772	jz	4f
773
774	movl	_mp_lock,%eax
775	cmpl	$FREE_LOCK,%eax
776	jne	1f
777	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
7781:
779	shrl	$24,%eax
780	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
781	shll	$24,%ecx
782	movl	lapic_icr_hi, %eax
783	andl	$~APIC_ID_MASK, %eax
784	orl	%ecx, %eax
785	movl	%eax, lapic_icr_hi
786
7872:
788	movl	lapic_icr_lo, %eax
789	andl	$APIC_DELSTAT_MASK,%eax
790	jnz	2b
791	movl	lapic_icr_lo, %eax
792	andl	$APIC_RESV2_MASK, %eax
793	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
794	movl	%eax, lapic_icr_lo
7953:
796	movl	lapic_icr_lo, %eax
797	andl	$APIC_DELSTAT_MASK,%eax
798	jnz	3b
7994:
800	ret
801
802/*
803 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
804 *
805 *  - Signals its receipt.
806 *  - Waits for permission to restart.
807 *  - Signals its restart.
808 */
809
810	.text
811	SUPERALIGN_TEXT
812	.globl _Xcpustop
813_Xcpustop:
814	pushl	%ebp
815	movl	%esp, %ebp
816	pushl	%eax
817	pushl	%ecx
818	pushl	%edx
819	pushl	%ds			/* save current data segment */
820	pushl	%es
821
822	movl	$KDSEL, %eax
823	movl	%ax, %ds		/* use KERNEL data segment */
824
825	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
826
827	movl	_cpuid, %eax
828	imull	$PCB_SIZE, %eax
829	leal	CNAME(stoppcbs)(%eax), %eax
830	pushl	%eax
831	call	CNAME(savectx)		/* Save process context */
832	addl	$4, %esp
833
834
835	movl	_cpuid, %eax
836
837	lock
838	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8391:
840	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
841	jnc	1b
842
843	lock
844	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
845	lock
846	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
847
848	test	%eax, %eax
849	jnz	2f
850
851	movl	CNAME(cpustop_restartfunc), %eax
852	test	%eax, %eax
853	jz	2f
854	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
855
856	call	%eax
8572:
858	popl	%es
859	popl	%ds			/* restore previous data segment */
860	popl	%edx
861	popl	%ecx
862	popl	%eax
863	movl	%ebp, %esp
864	popl	%ebp
865	iret
866
867
868MCOUNT_LABEL(bintr)
869	FAST_INTR(0,fastintr0)
870	FAST_INTR(1,fastintr1)
871	FAST_INTR(2,fastintr2)
872	FAST_INTR(3,fastintr3)
873	FAST_INTR(4,fastintr4)
874	FAST_INTR(5,fastintr5)
875	FAST_INTR(6,fastintr6)
876	FAST_INTR(7,fastintr7)
877	FAST_INTR(8,fastintr8)
878	FAST_INTR(9,fastintr9)
879	FAST_INTR(10,fastintr10)
880	FAST_INTR(11,fastintr11)
881	FAST_INTR(12,fastintr12)
882	FAST_INTR(13,fastintr13)
883	FAST_INTR(14,fastintr14)
884	FAST_INTR(15,fastintr15)
885	FAST_INTR(16,fastintr16)
886	FAST_INTR(17,fastintr17)
887	FAST_INTR(18,fastintr18)
888	FAST_INTR(19,fastintr19)
889	FAST_INTR(20,fastintr20)
890	FAST_INTR(21,fastintr21)
891	FAST_INTR(22,fastintr22)
892	FAST_INTR(23,fastintr23)
893	INTR(0,intr0)
894	INTR(1,intr1)
895	INTR(2,intr2)
896	INTR(3,intr3)
897	INTR(4,intr4)
898	INTR(5,intr5)
899	INTR(6,intr6)
900	INTR(7,intr7)
901	INTR(8,intr8)
902	INTR(9,intr9)
903	INTR(10,intr10)
904	INTR(11,intr11)
905	INTR(12,intr12)
906	INTR(13,intr13)
907	INTR(14,intr14)
908	INTR(15,intr15)
909	INTR(16,intr16)
910	INTR(17,intr17)
911	INTR(18,intr18)
912	INTR(19,intr19)
913	INTR(20,intr20)
914	INTR(21,intr21)
915	INTR(22,intr22)
916	INTR(23,intr23)
917MCOUNT_LABEL(eintr)
918
919	.data
920/*
921 * Addresses of interrupt handlers.
922 *  XresumeNN: Resumption addresses for HWIs.
923 */
924	.globl _ihandlers
925_ihandlers:
926ihandlers:
927/*
928 * used by:
929 *  ipl.s:	doreti_unpend
930 */
931	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
932	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
933	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
934	.long	Xresume12, Xresume13, Xresume14, Xresume15
935	.long	Xresume16, Xresume17, Xresume18, Xresume19
936	.long	Xresume20, Xresume21, Xresume22, Xresume23
937/*
938 * used by:
939 *  ipl.s:	doreti_unpend
940 *  apic_ipl.s:	splz_unpend
941 */
942	.long	swi_tty, swi_net
943	.long	dummycamisr, dummycamisr
944	.long	_swi_vm, 0
945	.long	_softclock, swi_ast
946
947imasks:				/* masks for interrupt handlers */
948	.space	NHWI*4		/* padding; HWI masks are elsewhere */
949
950	.long	SWI_TTY_MASK, SWI_NET_MASK
951	.long	SWI_CAMNET_MASK, SWI_CAMBIO_MASK
952	.long	SWI_VM_MASK, 0
953	.long	SWI_CLOCK_MASK, SWI_AST_MASK
954
955/* active flag for lazy masking */
956iactive:
957	.long	0
958
959#ifdef COUNT_XINVLTLB_HITS
960	.globl	_xhits
961_xhits:
962	.space	(NCPU * 4), 0
963#endif /* COUNT_XINVLTLB_HITS */
964
965/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
966	.globl _stopped_cpus, _started_cpus
967_stopped_cpus:
968	.long	0
969_started_cpus:
970	.long	0
971
972#ifdef BETTER_CLOCK
973	.globl _checkstate_probed_cpus
974_checkstate_probed_cpus:
975	.long	0
976#endif /* BETTER_CLOCK */
977	.globl _checkstate_need_ast
978_checkstate_need_ast:
979	.long	0
980_checkstate_pending_ast:
981	.long	0
982	.globl CNAME(forward_irq_misscnt)
983	.globl CNAME(forward_irq_toodeepcnt)
984	.globl CNAME(forward_irq_hitcnt)
985	.globl CNAME(resched_cpus)
986	.globl CNAME(want_resched_cnt)
987	.globl CNAME(cpuast_cnt)
988	.globl CNAME(cpustop_restartfunc)
989CNAME(forward_irq_misscnt):
990	.long 0
991CNAME(forward_irq_hitcnt):
992	.long 0
993CNAME(forward_irq_toodeepcnt):
994	.long 0
995CNAME(resched_cpus):
996	.long 0
997CNAME(want_resched_cnt):
998	.long 0
999CNAME(cpuast_cnt):
1000	.long 0
1001CNAME(cpustop_restartfunc):
1002	.long 0
1003
1004
1005
1006	.globl	_apic_pin_trigger
1007_apic_pin_trigger:
1008	.space	(NAPIC * 4), 0
1009
1010
1011/*
1012 * Interrupt counters and names.  The format of these and the label names
1013 * must agree with what vmstat expects.  The tables are indexed by device
1014 * ids so that we don't have to move the names around as devices are
1015 * attached.
1016 */
1017#include "vector.h"
1018	.globl	_intrcnt, _eintrcnt
1019_intrcnt:
1020	.space	(NR_DEVICES + ICU_LEN) * 4
1021_eintrcnt:
1022
1023	.globl	_intrnames, _eintrnames
1024_intrnames:
1025	.ascii	DEVICE_NAMES
1026	.asciz	"stray irq0"
1027	.asciz	"stray irq1"
1028	.asciz	"stray irq2"
1029	.asciz	"stray irq3"
1030	.asciz	"stray irq4"
1031	.asciz	"stray irq5"
1032	.asciz	"stray irq6"
1033	.asciz	"stray irq7"
1034	.asciz	"stray irq8"
1035	.asciz	"stray irq9"
1036	.asciz	"stray irq10"
1037	.asciz	"stray irq11"
1038	.asciz	"stray irq12"
1039	.asciz	"stray irq13"
1040	.asciz	"stray irq14"
1041	.asciz	"stray irq15"
1042	.asciz	"stray irq16"
1043	.asciz	"stray irq17"
1044	.asciz	"stray irq18"
1045	.asciz	"stray irq19"
1046	.asciz	"stray irq20"
1047	.asciz	"stray irq21"
1048	.asciz	"stray irq22"
1049	.asciz	"stray irq23"
1050_eintrnames:
1051
1052	.text
1053