apic_vector.s revision 38824
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 *	$Id: apic_vector.s,v 1.32 1998/08/11 17:01:32 bde Exp $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12
13#ifdef FAST_SIMPLELOCK
14
15#define GET_FAST_INTR_LOCK						\
16	pushl	$_fast_intr_lock ;		/* address of lock */	\
17	call	_s_lock ;			/* MP-safe */		\
18	addl	$4,%esp
19
20#define REL_FAST_INTR_LOCK						\
21	pushl	$_fast_intr_lock ;		/* address of lock */	\
22	call	_s_unlock ;			/* MP-safe */		\
23	addl	$4,%esp
24
25#else /* FAST_SIMPLELOCK */
26
27#define GET_FAST_INTR_LOCK						\
28	call	_get_isrlock
29
30#define REL_FAST_INTR_LOCK						\
31	pushl	$_mp_lock ;	/* GIANT_LOCK */			\
32	call	_MPrellock ;						\
33	add	$4, %esp
34
35#endif /* FAST_SIMPLELOCK */
36
37/* convert an absolute IRQ# into a bitmask */
38#define IRQ_BIT(irq_num)	(1 << (irq_num))
39
40/* make an index into the IO APIC from the IRQ# */
41#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
42
43
44/*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48#ifdef FAST_WITHOUTCPL
49
50/*
51 */
52#define	FAST_INTR(irq_num, vec_name)					\
53	.text ;								\
54	SUPERALIGN_TEXT ;						\
55IDTVEC(vec_name) ;							\
56	pushl	%eax ;		/* save only call-used registers */	\
57	pushl	%ecx ;							\
58	pushl	%edx ;							\
59	pushl	%ds ;							\
60	MAYBE_PUSHL_ES ;						\
61	movl	$KDSEL,%eax ;						\
62	movl	%ax,%ds ;						\
63	MAYBE_MOVW_AX_ES ;						\
64	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
65	pushl	_intr_unit + (irq_num) * 4 ;				\
66	GET_FAST_INTR_LOCK ;						\
67	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
68	REL_FAST_INTR_LOCK ;						\
69	addl	$4, %esp ;						\
70	movl	$0, lapic_eoi ;						\
71	lock ; 								\
72	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
73	movl	_intr_countp + (irq_num) * 4, %eax ;			\
74	lock ; 								\
75	incl	(%eax) ;						\
76	MEXITCOUNT ;							\
77	MAYBE_POPL_ES ;							\
78	popl	%ds ;							\
79	popl	%edx ;							\
80	popl	%ecx ;							\
81	popl	%eax ;							\
82	iret
83
84#else /* FAST_WITHOUTCPL */
85
86#define	FAST_INTR(irq_num, vec_name)					\
87	.text ;								\
88	SUPERALIGN_TEXT ;						\
89IDTVEC(vec_name) ;							\
90	pushl	%eax ;		/* save only call-used registers */	\
91	pushl	%ecx ;							\
92	pushl	%edx ;							\
93	pushl	%ds ;							\
94	MAYBE_PUSHL_ES ;						\
95	movl	$KDSEL, %eax ;						\
96	movl	%ax, %ds ;						\
97	MAYBE_MOVW_AX_ES ;						\
98	FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;			\
99	GET_FAST_INTR_LOCK ;						\
100	pushl	_intr_unit + (irq_num) * 4 ;				\
101	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
102	addl	$4, %esp ;						\
103	movl	$0, lapic_eoi ;						\
104	lock ; 								\
105	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
106	movl	_intr_countp + (irq_num) * 4,%eax ;			\
107	lock ; 								\
108	incl	(%eax) ;						\
109	movl	_cpl, %eax ;	/* unmasking pending HWIs or SWIs? */	\
110	notl	%eax ;							\
111	andl	_ipending, %eax ;					\
112	jne	2f ; 		/* yes, maybe handle them */		\
1131: ;									\
114	MEXITCOUNT ;							\
115	REL_FAST_INTR_LOCK ;						\
116	MAYBE_POPL_ES ;							\
117	popl	%ds ;							\
118	popl	%edx ;							\
119	popl	%ecx ;							\
120	popl	%eax ;							\
121	iret ;								\
122;									\
123	ALIGN_TEXT ;							\
1242: ;									\
125	cmpb	$3, _intr_nesting_level ;	/* enough stack? */	\
126	jae	1b ;		/* no, return */			\
127	movl	_cpl, %eax ;						\
128	/* XXX next line is probably unnecessary now. */		\
129	movl	$HWI_MASK|SWI_MASK, _cpl ;	/* limit nesting ... */	\
130	lock ; 								\
131	incb	_intr_nesting_level ;	/* ... really limit it ... */	\
132	sti ;			/* to do this as early as possible */	\
133	MAYBE_POPL_ES ;		/* discard most of thin frame ... */	\
134	popl	%ecx ;		/* ... original %ds ... */		\
135	popl	%edx ;							\
136	xchgl	%eax, 4(%esp) ;	/* orig %eax; save cpl */		\
137	pushal ;		/* build fat frame (grrr) ... */	\
138	pushl	%ecx ;		/* ... actually %ds ... */		\
139	pushl	%es ;							\
140	movl	$KDSEL, %eax ;						\
141	movl	%ax, %es ;						\
142	movl	(2+8+0)*4(%esp), %ecx ;	/* %ecx from thin frame ... */	\
143	movl	%ecx, (2+6)*4(%esp) ;	/* ... to fat frame ... */	\
144	movl	(2+8+1)*4(%esp), %eax ;	/* ... cpl from thin frame */	\
145	pushl	%eax ;							\
146	subl	$4, %esp ;	/* junk for unit number */		\
147	MEXITCOUNT ;							\
148	jmp	_doreti
149
150#endif /** FAST_WITHOUTCPL */
151
152
153/*
154 *
155 */
156#define PUSH_FRAME							\
157	pushl	$0 ;		/* dummy error code */			\
158	pushl	$0 ;		/* dummy trap type */			\
159	pushal ;							\
160	pushl	%ds ;		/* save data and extra segments ... */	\
161	pushl	%es
162
163#define POP_FRAME							\
164	popl	%es ;							\
165	popl	%ds ;							\
166	popal ;								\
167	addl	$4+4,%esp
168
169#define MASK_IRQ(irq_num)						\
170	IMASK_LOCK ;				/* into critical reg */	\
171	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
172	jne	7f ;			/* masked, don't mask */	\
173	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
174	movl	_ioapic, %ecx ;			/* ioapic[0] addr */	\
175	movl	$REDTBL_IDX(irq_num), (%ecx) ;	/* write the index */	\
176	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
177	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
178	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1797: ;						/* already masked */	\
180	IMASK_UNLOCK
181/*
182 * Test to see whether we are handling an edge or level triggered INT.
183 *  Level-triggered INTs must still be masked as we don't clear the source,
184 *  and the EOI cycle would cause redundant INTs to occur.
185 */
186#define MASK_LEVEL_IRQ(irq_num)						\
187	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
188	jz	9f ;				/* edge, don't mask */	\
189	MASK_IRQ(irq_num) ;						\
1909:
191
192
193#ifdef APIC_INTR_REORDER
194#define EOI_IRQ(irq_num)						\
195	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
196	movl	(%eax), %eax ;						\
197	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
198	jz	9f ;				/* not active */	\
199	movl	$0, lapic_eoi ;						\
200	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2019:
202
203#else
204#define EOI_IRQ(irq_num)						\
205	testl	$IRQ_BIT(irq_num), lapic_isr1;				\
206	jz	9f	;			/* not active */	\
207	movl	$0, lapic_eoi;						\
208	APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;	\
2099:
210#endif
211
212
213/*
214 * Test to see if the source is currntly masked, clear if so.
215 */
216#define UNMASK_IRQ(irq_num)					\
217	IMASK_LOCK ;				/* into critical reg */	\
218	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
219	je	7f ;			/* bit clear, not masked */	\
220	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
221	movl	_ioapic,%ecx ;			/* ioapic[0]addr */	\
222	movl	$REDTBL_IDX(irq_num),(%ecx) ;	/* write the index */	\
223	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
224	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
225	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
2267: ;									\
227	IMASK_UNLOCK
228
229#ifdef INTR_SIMPLELOCK
230#define ENLOCK
231#define DELOCK
232#define LATELOCK call	_get_isrlock
233#else
234#define ENLOCK \
235	ISR_TRYLOCK ;		/* XXX this is going away... */		\
236	testl	%eax, %eax ;			/* did we get it? */	\
237	jz	3f
238#define DELOCK	ISR_RELLOCK
239#define LATELOCK
240#endif
241
242#ifdef APIC_INTR_DIAGNOSTIC
243#ifdef APIC_INTR_DIAGNOSTIC_IRQ
244log_intr_event:
245	pushf
246	cli
247	pushl	$CNAME(apic_itrace_debuglock)
248	call	_s_lock_np
249	addl	$4, %esp
250	movl	CNAME(apic_itrace_debugbuffer_idx), %ecx
251	andl	$32767, %ecx
252	movl	_cpuid, %eax
253	shll	$8,	%eax
254	orl	8(%esp), %eax
255	movw	%ax,	CNAME(apic_itrace_debugbuffer)(,%ecx,2)
256	incl	%ecx
257	andl	$32767, %ecx
258	movl	%ecx,	CNAME(apic_itrace_debugbuffer_idx)
259	pushl	$CNAME(apic_itrace_debuglock)
260	call	_s_unlock_np
261	addl	$4, %esp
262	popf
263	ret
264
265
266#define APIC_ITRACE(name, irq_num, id)					\
267	lock ;					/* MP-safe */		\
268	incl	CNAME(name) + (irq_num) * 4 ;				\
269	pushl	%eax ;							\
270	pushl	%ecx ;							\
271	pushl	%edx ;							\
272	movl	$(irq_num), %eax ;					\
273	cmpl	$APIC_INTR_DIAGNOSTIC_IRQ, %eax ;			\
274	jne	7f ;							\
275	pushl	$id ;							\
276	call	log_intr_event ;					\
277	addl	$4, %esp ;						\
2787: ;									\
279	popl	%edx ;							\
280	popl	%ecx ;							\
281	popl	%eax
282#else
283#define APIC_ITRACE(name, irq_num, id)					\
284	lock ;					/* MP-safe */		\
285	incl	CNAME(name) + (irq_num) * 4
286#endif
287
288#define APIC_ITRACE_ENTER 1
289#define APIC_ITRACE_EOI 2
290#define APIC_ITRACE_TRYISRLOCK 3
291#define APIC_ITRACE_GOTISRLOCK 4
292#define APIC_ITRACE_ENTER2 5
293#define APIC_ITRACE_LEAVE 6
294#define APIC_ITRACE_UNMASK 7
295#define APIC_ITRACE_ACTIVE 8
296#define APIC_ITRACE_MASKED 9
297#define APIC_ITRACE_NOISRLOCK 10
298#define APIC_ITRACE_MASKED2 11
299#define APIC_ITRACE_SPLZ 12
300#define APIC_ITRACE_DORETI 13
301
302#else
303#define APIC_ITRACE(name, irq_num, id)
304#endif
305
306#ifdef CPL_AND_CML
307
308#define	INTR(irq_num, vec_name)						\
309	.text ;								\
310	SUPERALIGN_TEXT ;						\
311/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
312IDTVEC(vec_name) ;							\
313	PUSH_FRAME ;							\
314	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
315	movl	%ax, %ds ;						\
316	movl	%ax, %es ;						\
317;									\
318	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
319	lock ;					/* MP-safe */		\
320	btsl	$(irq_num), iactive ;		/* lazy masking */	\
321	jc	1f ;				/* already active */	\
322;									\
323	MASK_LEVEL_IRQ(irq_num) ;					\
324	EOI_IRQ(irq_num) ;						\
3250: ;									\
326	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
327	ENLOCK ;							\
328;									\
329	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
330	AVCPL_LOCK ;				/* MP-safe */		\
331	testl	$IRQ_BIT(irq_num), _cpl ;				\
332	jne	2f ;				/* this INT masked */	\
333	testl	$IRQ_BIT(irq_num), _cml ;				\
334	jne	2f ;				/* this INT masked */	\
335	orl	$IRQ_BIT(irq_num), _cil ;				\
336	AVCPL_UNLOCK ;							\
337;									\
338	incb	_intr_nesting_level ;					\
339;	 								\
340  /* entry point used by doreti_unpend for HWIs. */			\
341__CONCAT(Xresume,irq_num): ;						\
342	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
343	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
344	movl	_intr_countp + (irq_num) * 4, %eax ;			\
345	lock ;	incl	(%eax) ;					\
346;									\
347	AVCPL_LOCK ;				/* MP-safe */		\
348	movl	_cml, %eax ;						\
349	pushl	%eax ;							\
350	orl	_intr_mask + (irq_num) * 4, %eax ;			\
351	movl	%eax, _cml ;						\
352	AVCPL_UNLOCK ;							\
353;									\
354	pushl	_intr_unit + (irq_num) * 4 ;				\
355	incl	_inside_intr ;						\
356	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
357	sti ;								\
358	call	*_intr_handler + (irq_num) * 4 ;			\
359	cli ;								\
360	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
361	decl	_inside_intr ;						\
362;									\
363	lock ;	andl $~IRQ_BIT(irq_num), iactive ;			\
364	lock ;	andl $~IRQ_BIT(irq_num), _cil ;				\
365	UNMASK_IRQ(irq_num) ;						\
366	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
367	sti ;				/* doreti repeats cli/sti */	\
368	MEXITCOUNT ;							\
369	LATELOCK ;							\
370	jmp	_doreti ;						\
371;									\
372	ALIGN_TEXT ;							\
3731: ;						/* active */		\
374	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
375	MASK_IRQ(irq_num) ;						\
376	EOI_IRQ(irq_num) ;						\
377	AVCPL_LOCK ;				/* MP-safe */		\
378	orl	$IRQ_BIT(irq_num), _ipending ;				\
379	AVCPL_UNLOCK ;							\
380	lock ;								\
381	btsl	$(irq_num), iactive ;		/* still active */	\
382	jnc	0b ;				/* retry */		\
383	POP_FRAME ;							\
384	iret ;								\
385;									\
386	ALIGN_TEXT ;							\
3872: ;						/* masked by cpl|cml */	\
388	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
389	orl	$IRQ_BIT(irq_num), _ipending ;				\
390	AVCPL_UNLOCK ;							\
391	DELOCK ;		/* XXX this is going away... */		\
392	POP_FRAME ;							\
393	iret ;								\
394	ALIGN_TEXT ;							\
3953: ; 			/* other cpu has isr lock */			\
396	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
397	AVCPL_LOCK ;				/* MP-safe */		\
398	orl	$IRQ_BIT(irq_num), _ipending ;				\
399	testl	$IRQ_BIT(irq_num), _cpl ;				\
400	jne	4f ;				/* this INT masked */	\
401	testl	$IRQ_BIT(irq_num), _cml ;				\
402	jne	4f ;				/* this INT masked */	\
403	orl	$IRQ_BIT(irq_num), _cil ;				\
404	AVCPL_UNLOCK ;							\
405	call	forward_irq ;	/* forward irq to lock holder */	\
406	POP_FRAME ;	 			/* and return */	\
407	iret ;								\
408	ALIGN_TEXT ;							\
4094: ;	 					/* blocked */		\
410	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
411	AVCPL_UNLOCK ;							\
412	POP_FRAME ;	 			/* and return */	\
413	iret
414
415#else /* CPL_AND_CML */
416
417
418#define	INTR(irq_num, vec_name)						\
419	.text ;								\
420	SUPERALIGN_TEXT ;						\
421/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */	\
422IDTVEC(vec_name) ;							\
423	PUSH_FRAME ;							\
424	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
425	movl	%ax, %ds ;						\
426	movl	%ax, %es ;						\
427;									\
428	APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;	\
429	lock ;					/* MP-safe */		\
430	btsl	$(irq_num), iactive ;		/* lazy masking */	\
431	jc	1f ;				/* already active */	\
432;									\
433	MASK_LEVEL_IRQ(irq_num) ;					\
434	EOI_IRQ(irq_num) ;						\
4350: ;									\
436	APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
437	ISR_TRYLOCK ;		/* XXX this is going away... */		\
438	testl	%eax, %eax ;			/* did we get it? */	\
439	jz	3f ;				/* no */		\
440;									\
441	APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
442	AVCPL_LOCK ;				/* MP-safe */		\
443	testl	$IRQ_BIT(irq_num), _cpl ;				\
444	jne	2f ;				/* this INT masked */	\
445	AVCPL_UNLOCK ;							\
446;									\
447	incb	_intr_nesting_level ;					\
448;	 								\
449  /* entry point used by doreti_unpend for HWIs. */			\
450__CONCAT(Xresume,irq_num): ;						\
451	FAKE_MCOUNT(12*4(%esp)) ;		/* XXX avoid dbl cnt */ \
452	lock ;	incl	_cnt+V_INTR ;		/* tally interrupts */	\
453	movl	_intr_countp + (irq_num) * 4, %eax ;			\
454	lock ;	incl	(%eax) ;					\
455;									\
456	AVCPL_LOCK ;				/* MP-safe */		\
457	movl	_cpl, %eax ;						\
458	pushl	%eax ;							\
459	orl	_intr_mask + (irq_num) * 4, %eax ;			\
460	movl	%eax, _cpl ;						\
461	andl	$~IRQ_BIT(irq_num), _ipending ;				\
462	AVCPL_UNLOCK ;							\
463;									\
464	pushl	_intr_unit + (irq_num) * 4 ;				\
465	APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;	\
466	sti ;								\
467	call	*_intr_handler + (irq_num) * 4 ;			\
468	cli ;								\
469	APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;	\
470;									\
471	lock ;	andl	$~IRQ_BIT(irq_num), iactive ;			\
472	UNMASK_IRQ(irq_num) ;						\
473	APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;	\
474	sti ;				/* doreti repeats cli/sti */	\
475	MEXITCOUNT ;							\
476	jmp	_doreti ;						\
477;									\
478	ALIGN_TEXT ;							\
4791: ;						/* active  */		\
480	APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;	\
481	MASK_IRQ(irq_num) ;						\
482	EOI_IRQ(irq_num) ;						\
483	AVCPL_LOCK ;				/* MP-safe */		\
484	orl	$IRQ_BIT(irq_num), _ipending ;				\
485	AVCPL_UNLOCK ;							\
486	lock ;								\
487	btsl	$(irq_num), iactive ;		/* still active */	\
488	jnc	0b ;				/* retry */		\
489	POP_FRAME ;							\
490	iret ;		/* XXX:	 iactive bit might be 0 now */		\
491	ALIGN_TEXT ;							\
4922: ;				/* masked by cpl, leave iactive set */	\
493	APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;	\
494	orl	$IRQ_BIT(irq_num), _ipending ;				\
495	AVCPL_UNLOCK ;							\
496	ISR_RELLOCK ;		/* XXX this is going away... */		\
497	POP_FRAME ;							\
498	iret ;								\
499	ALIGN_TEXT ;							\
5003: ; 			/* other cpu has isr lock */			\
501	APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
502	AVCPL_LOCK ;				/* MP-safe */		\
503	orl	$IRQ_BIT(irq_num), _ipending ;				\
504	testl	$IRQ_BIT(irq_num), _cpl ;				\
505	jne	4f ;				/* this INT masked */	\
506	AVCPL_UNLOCK ;							\
507	call	forward_irq ;	 /* forward irq to lock holder */	\
508	POP_FRAME ;	 			/* and return */	\
509	iret ;								\
510	ALIGN_TEXT ;							\
5114: ;	 					/* blocked */		\
512	APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
513	AVCPL_UNLOCK ;							\
514	POP_FRAME ;	 			/* and return */	\
515	iret
516
517#endif /* CPL_AND_CML */
518
519
520/*
521 * Handle "spurious INTerrupts".
522 * Notes:
523 *  This is different than the "spurious INTerrupt" generated by an
524 *   8259 PIC for missing INTs.  See the APIC documentation for details.
525 *  This routine should NOT do an 'EOI' cycle.
526 */
527	.text
528	SUPERALIGN_TEXT
529	.globl _Xspuriousint
530_Xspuriousint:
531
532	/* No EOI cycle used here */
533
534	iret
535
536
537/*
538 * Handle TLB shootdowns.
539 */
540	.text
541	SUPERALIGN_TEXT
542	.globl	_Xinvltlb
543_Xinvltlb:
544	pushl	%eax
545
546#ifdef COUNT_XINVLTLB_HITS
547	ss
548	movl	_cpuid, %eax
549	ss
550	incl	_xhits(,%eax,4)
551#endif /* COUNT_XINVLTLB_HITS */
552
553	movl	%cr3, %eax		/* invalidate the TLB */
554	movl	%eax, %cr3
555
556	ss				/* stack segment, avoid %ds load */
557	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
558
559	popl	%eax
560	iret
561
562
563#ifdef BETTER_CLOCK
564
565/*
566 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
567 *
568 *  - Stores current cpu state in checkstate_cpustate[cpuid]
569 *      0 == user, 1 == sys, 2 == intr
570 *  - Stores current process in checkstate_curproc[cpuid]
571 *
572 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
573 *
574 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags
575 */
576
577	.text
578	SUPERALIGN_TEXT
579	.globl _Xcpucheckstate
580	.globl _checkstate_cpustate
581	.globl _checkstate_curproc
582	.globl _checkstate_pc
583_Xcpucheckstate:
584	pushl	%eax
585	pushl	%ebx
586	pushl	%ds			/* save current data segment */
587
588	movl	$KDSEL, %eax
589	movl	%ax, %ds		/* use KERNEL data segment */
590
591	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
592
593	movl	$0, %ebx
594	movl	16(%esp), %eax
595	andl	$3, %eax
596	cmpl	$3, %eax
597	je	1f
598#ifdef VM86
599	testl	$PSL_VM, 20(%esp)
600	jne	1f
601#endif
602	incl	%ebx			/* system or interrupt */
603#ifdef CPL_AND_CML
604	cmpl	$0, _inside_intr
605	je	1f
606	incl	%ebx			/* interrupt */
607#endif
6081:
609	movl	_cpuid, %eax
610	movl	%ebx, _checkstate_cpustate(,%eax,4)
611	movl	_curproc, %ebx
612	movl	%ebx, _checkstate_curproc(,%eax,4)
613	movl	12(%esp), %ebx
614	movl	%ebx, _checkstate_pc(,%eax,4)
615
616	lock				/* checkstate_probed_cpus |= (1<<id) */
617	btsl	%eax, _checkstate_probed_cpus
618
619	popl	%ds			/* restore previous data segment */
620	popl	%ebx
621	popl	%eax
622	iret
623
624#endif /* BETTER_CLOCK */
625
626/*
627 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
628 *
629 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
630 *
631 *  - We need a better method of triggering asts on other cpus.
632 */
633
634	.text
635	SUPERALIGN_TEXT
636	.globl _Xcpuast
637_Xcpuast:
638	PUSH_FRAME
639	movl	$KDSEL, %eax
640	movl	%ax, %ds		/* use KERNEL data segment */
641	movl	%ax, %es
642
643	movl	_cpuid, %eax
644	lock				/* checkstate_need_ast &= ~(1<<id) */
645	btrl	%eax, _checkstate_need_ast
646	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
647
648	lock
649	btsl	%eax, _checkstate_pending_ast
650	jc	1f
651
652	FAKE_MCOUNT(12*4(%esp))
653
654	/*
655	 * Giant locks do not come cheap.
656	 * A lot of cycles are going to be wasted here.
657	 */
658	call	_get_isrlock
659
660	AVCPL_LOCK
661#ifdef CPL_AND_CML
662	movl	_cml, %eax
663#else
664	movl	_cpl, %eax
665#endif
666	pushl	%eax
667	orl	$SWI_AST_PENDING, _ipending
668	AVCPL_UNLOCK
669	lock
670	incb	_intr_nesting_level
671	sti
672
673	pushl	$0
674
675	movl	_cpuid, %eax
676	lock
677	btrl	%eax, _checkstate_pending_ast
678	lock
679	btrl	%eax, CNAME(resched_cpus)
680	jz	2f
681	movl	$1, CNAME(want_resched)
682	lock
683	incl	CNAME(want_resched_cnt)
6842:
685	lock
686	incl	CNAME(cpuast_cnt)
687	MEXITCOUNT
688	jmp	_doreti
6891:
690	/* We are already in the process of delivering an ast for this CPU */
691	POP_FRAME
692	iret
693
694
695/*
696 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
697 */
698
699	.text
700	SUPERALIGN_TEXT
701	.globl _Xforward_irq
702_Xforward_irq:
703	PUSH_FRAME
704	movl	$KDSEL, %eax
705	movl	%ax, %ds		/* use KERNEL data segment */
706	movl	%ax, %es
707
708	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
709
710	FAKE_MCOUNT(12*4(%esp))
711
712	ISR_TRYLOCK
713	testl	%eax,%eax		/* Did we get the lock ? */
714	jz  1f				/* No */
715
716	lock
717	incl	CNAME(forward_irq_hitcnt)
718	cmpb	$4, _intr_nesting_level
719	jae	2f
720
721	AVCPL_LOCK
722#ifdef CPL_AND_CML
723	movl	_cml, %eax
724#else
725	movl	_cpl, %eax
726#endif
727	pushl	%eax
728	AVCPL_UNLOCK
729	lock
730	incb	_intr_nesting_level
731	sti
732
733	pushl	$0
734
735	MEXITCOUNT
736	jmp	_doreti			/* Handle forwarded interrupt */
7371:
738	lock
739	incl	CNAME(forward_irq_misscnt)
740	call	forward_irq	/* Oops, we've lost the isr lock */
741	MEXITCOUNT
742	POP_FRAME
743	iret
7442:
745	lock
746	incl	CNAME(forward_irq_toodeepcnt)
7473:
748	ISR_RELLOCK
749	MEXITCOUNT
750	POP_FRAME
751	iret
752
753/*
754 *
755 */
756forward_irq:
757	MCOUNT
758	cmpl	$0,_invltlb_ok
759	jz	4f
760
761	cmpl	$0, CNAME(forward_irq_enabled)
762	jz	4f
763
764	movl	_mp_lock,%eax
765	cmpl	$FREE_LOCK,%eax
766	jne	1f
767	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
7681:
769	shrl	$24,%eax
770	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
771	shll	$24,%ecx
772	movl	lapic_icr_hi, %eax
773	andl	$~APIC_ID_MASK, %eax
774	orl	%ecx, %eax
775	movl	%eax, lapic_icr_hi
776
7772:
778	movl	lapic_icr_lo, %eax
779	andl	$APIC_DELSTAT_MASK,%eax
780	jnz	2b
781	movl	lapic_icr_lo, %eax
782	andl	$APIC_RESV2_MASK, %eax
783	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
784	movl	%eax, lapic_icr_lo
7853:
786	movl	lapic_icr_lo, %eax
787	andl	$APIC_DELSTAT_MASK,%eax
788	jnz	3b
7894:
790	ret
791
792/*
793 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
794 *
795 *  - Signals its receipt.
796 *  - Waits for permission to restart.
797 *  - Signals its restart.
798 */
799
800	.text
801	SUPERALIGN_TEXT
802	.globl _Xcpustop
803_Xcpustop:
804	pushl	%ebp
805	movl	%esp, %ebp
806	pushl	%eax
807	pushl	%ecx
808	pushl	%edx
809	pushl	%ds			/* save current data segment */
810	pushl	%es
811
812	movl	$KDSEL, %eax
813	movl	%ax, %ds		/* use KERNEL data segment */
814
815	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
816
817	movl	_cpuid, %eax
818	imull	$PCB_SIZE, %eax
819	leal	CNAME(stoppcbs)(%eax), %eax
820	pushl	%eax
821	call	CNAME(savectx)		/* Save process context */
822	addl	$4, %esp
823
824
825	movl	_cpuid, %eax
826
827	lock
828	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
8291:
830	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
831	jnc	1b
832
833	lock
834	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
835	lock
836	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
837
838	test	%eax, %eax
839	jnz	2f
840
841	movl	CNAME(cpustop_restartfunc), %eax
842	test	%eax, %eax
843	jz	2f
844	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
845
846	call	%eax
8472:
848	popl	%es
849	popl	%ds			/* restore previous data segment */
850	popl	%edx
851	popl	%ecx
852	popl	%eax
853	movl	%ebp, %esp
854	popl	%ebp
855	iret
856
857
858MCOUNT_LABEL(bintr)
859	FAST_INTR(0,fastintr0)
860	FAST_INTR(1,fastintr1)
861	FAST_INTR(2,fastintr2)
862	FAST_INTR(3,fastintr3)
863	FAST_INTR(4,fastintr4)
864	FAST_INTR(5,fastintr5)
865	FAST_INTR(6,fastintr6)
866	FAST_INTR(7,fastintr7)
867	FAST_INTR(8,fastintr8)
868	FAST_INTR(9,fastintr9)
869	FAST_INTR(10,fastintr10)
870	FAST_INTR(11,fastintr11)
871	FAST_INTR(12,fastintr12)
872	FAST_INTR(13,fastintr13)
873	FAST_INTR(14,fastintr14)
874	FAST_INTR(15,fastintr15)
875	FAST_INTR(16,fastintr16)
876	FAST_INTR(17,fastintr17)
877	FAST_INTR(18,fastintr18)
878	FAST_INTR(19,fastintr19)
879	FAST_INTR(20,fastintr20)
880	FAST_INTR(21,fastintr21)
881	FAST_INTR(22,fastintr22)
882	FAST_INTR(23,fastintr23)
883	INTR(0,intr0)
884	INTR(1,intr1)
885	INTR(2,intr2)
886	INTR(3,intr3)
887	INTR(4,intr4)
888	INTR(5,intr5)
889	INTR(6,intr6)
890	INTR(7,intr7)
891	INTR(8,intr8)
892	INTR(9,intr9)
893	INTR(10,intr10)
894	INTR(11,intr11)
895	INTR(12,intr12)
896	INTR(13,intr13)
897	INTR(14,intr14)
898	INTR(15,intr15)
899	INTR(16,intr16)
900	INTR(17,intr17)
901	INTR(18,intr18)
902	INTR(19,intr19)
903	INTR(20,intr20)
904	INTR(21,intr21)
905	INTR(22,intr22)
906	INTR(23,intr23)
907MCOUNT_LABEL(eintr)
908
909	.data
910/*
911 * Addresses of interrupt handlers.
912 *  XresumeNN: Resumption addresses for HWIs.
913 */
914	.globl _ihandlers
915_ihandlers:
916/*
917 * used by:
918 *  ipl.s:	doreti_unpend
919 */
920	.long	Xresume0,  Xresume1,  Xresume2,  Xresume3
921	.long	Xresume4,  Xresume5,  Xresume6,  Xresume7
922	.long	Xresume8,  Xresume9,  Xresume10, Xresume11
923	.long	Xresume12, Xresume13, Xresume14, Xresume15
924	.long	Xresume16, Xresume17, Xresume18, Xresume19
925	.long	Xresume20, Xresume21, Xresume22, Xresume23
926/*
927 * used by:
928 *  ipl.s:	doreti_unpend
929 *  apic_ipl.s:	splz_unpend
930 */
931	.long	_swi_null, swi_net, _swi_null, _swi_null
932	.long	_swi_vm, _swi_null, _softclock, swi_ast
933
934imasks:				/* masks for interrupt handlers */
935	.space	NHWI*4		/* padding; HWI masks are elsewhere */
936
937	.long	SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
938	.long	SWI_VM_MASK, 0, SWI_CLOCK_MASK, SWI_AST_MASK
939
940/* active flag for lazy masking */
941iactive:
942	.long	0
943
944#ifdef COUNT_XINVLTLB_HITS
945	.globl	_xhits
946_xhits:
947	.space	(NCPU * 4), 0
948#endif /* COUNT_XINVLTLB_HITS */
949
950/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
951	.globl _stopped_cpus, _started_cpus
952_stopped_cpus:
953	.long	0
954_started_cpus:
955	.long	0
956
957#ifdef BETTER_CLOCK
958	.globl _checkstate_probed_cpus
959_checkstate_probed_cpus:
960	.long	0
961#endif /* BETTER_CLOCK */
962	.globl _checkstate_need_ast
963_checkstate_need_ast:
964	.long	0
965_checkstate_pending_ast:
966	.long	0
967	.globl CNAME(forward_irq_misscnt)
968	.globl CNAME(forward_irq_toodeepcnt)
969	.globl CNAME(forward_irq_hitcnt)
970	.globl CNAME(resched_cpus)
971	.globl CNAME(want_resched_cnt)
972	.globl CNAME(cpuast_cnt)
973	.globl CNAME(cpustop_restartfunc)
974CNAME(forward_irq_misscnt):
975	.long 0
976CNAME(forward_irq_hitcnt):
977	.long 0
978CNAME(forward_irq_toodeepcnt):
979	.long 0
980CNAME(resched_cpus):
981	.long 0
982CNAME(want_resched_cnt):
983	.long 0
984CNAME(cpuast_cnt):
985	.long 0
986CNAME(cpustop_restartfunc):
987	.long 0
988
989
990
991	.globl	_apic_pin_trigger
992_apic_pin_trigger:
993	.space	(NAPIC * 4), 0
994
995
996/*
997 * Interrupt counters and names.  The format of these and the label names
998 * must agree with what vmstat expects.  The tables are indexed by device
999 * ids so that we don't have to move the names around as devices are
1000 * attached.
1001 */
1002#include "vector.h"
1003	.globl	_intrcnt, _eintrcnt
1004_intrcnt:
1005	.space	(NR_DEVICES + ICU_LEN) * 4
1006_eintrcnt:
1007
1008	.globl	_intrnames, _eintrnames
1009_intrnames:
1010	.ascii	DEVICE_NAMES
1011	.asciz	"stray irq0"
1012	.asciz	"stray irq1"
1013	.asciz	"stray irq2"
1014	.asciz	"stray irq3"
1015	.asciz	"stray irq4"
1016	.asciz	"stray irq5"
1017	.asciz	"stray irq6"
1018	.asciz	"stray irq7"
1019	.asciz	"stray irq8"
1020	.asciz	"stray irq9"
1021	.asciz	"stray irq10"
1022	.asciz	"stray irq11"
1023	.asciz	"stray irq12"
1024	.asciz	"stray irq13"
1025	.asciz	"stray irq14"
1026	.asciz	"stray irq15"
1027	.asciz	"stray irq16"
1028	.asciz	"stray irq17"
1029	.asciz	"stray irq18"
1030	.asciz	"stray irq19"
1031	.asciz	"stray irq20"
1032	.asciz	"stray irq21"
1033	.asciz	"stray irq22"
1034	.asciz	"stray irq23"
1035_eintrnames:
1036
1037	.text
1038