apic_vector.s revision 75393
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 75393 2001-04-10 21:34:13Z jhb $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURPROC),%ebx ;					\
52	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	intr_unit + (irq_num) * 4 ;				\
54	call	*intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55	addl	$4, %esp ;						\
56	movl	$0, lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURPROC),%ebx ;					\
156	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158  /* entry point used by doreti_unpend for HWIs. */			\
159__CONCAT(Xresume,irq_num): ;						\
160	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
161	pushl	$irq_num;			/* pass the IRQ */	\
162	call	sched_ithd ;						\
163	addl	$4, %esp ;		/* discard the parameter */	\
164;									\
165	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
166	MEXITCOUNT ;							\
167	jmp	doreti
168
169/*
170 * Handle "spurious INTerrupts".
171 * Notes:
172 *  This is different than the "spurious INTerrupt" generated by an
173 *   8259 PIC for missing INTs.  See the APIC documentation for details.
174 *  This routine should NOT do an 'EOI' cycle.
175 */
176	.text
177	SUPERALIGN_TEXT
178	.globl Xspuriousint
179Xspuriousint:
180
181	/* No EOI cycle used here */
182
183	iret
184
185
186/*
187 * Handle TLB shootdowns.
188 */
189	.text
190	SUPERALIGN_TEXT
191	.globl	Xinvltlb
192Xinvltlb:
193	pushl	%eax
194
195#ifdef COUNT_XINVLTLB_HITS
196	pushl	%fs
197	movl	$KPSEL, %eax
198	mov	%ax, %fs
199	movl	PCPU(CPUID), %eax
200	popl	%fs
201	ss
202	incl	_xhits(,%eax,4)
203#endif /* COUNT_XINVLTLB_HITS */
204
205	movl	%cr3, %eax		/* invalidate the TLB */
206	movl	%eax, %cr3
207
208	ss				/* stack segment, avoid %ds load */
209	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
210
211	popl	%eax
212	iret
213
214
215/*
216 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
217 *
218 *  - Stores current cpu state in checkstate_cpustate[cpuid]
219 *      0 == user, 1 == sys, 2 == intr
220 *  - Stores current process in checkstate_curproc[cpuid]
221 *
222 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
223 *
224 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
225 */
226
227	.text
228	SUPERALIGN_TEXT
229	.globl Xcpucheckstate
230	.globl checkstate_cpustate
231	.globl checkstate_curproc
232	.globl checkstate_pc
233Xcpucheckstate:
234	pushl	%eax
235	pushl	%ebx
236	pushl	%ds			/* save current data segment */
237	pushl	%fs
238
239	movl	$KDSEL, %eax
240	mov	%ax, %ds		/* use KERNEL data segment */
241	movl	$KPSEL, %eax
242	mov	%ax, %fs
243
244	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
245
246	movl	$0, %ebx
247	movl	20(%esp), %eax
248	andl	$3, %eax
249	cmpl	$3, %eax
250	je	1f
251	testl	$PSL_VM, 24(%esp)
252	jne	1f
253	incl	%ebx			/* system or interrupt */
2541:
255	movl	PCPU(CPUID), %eax
256	movl	%ebx, checkstate_cpustate(,%eax,4)
257	movl	PCPU(CURPROC), %ebx
258	movl	%ebx, checkstate_curproc(,%eax,4)
259
260	movl	16(%esp), %ebx
261	movl	%ebx, checkstate_pc(,%eax,4)
262
263	lock				/* checkstate_probed_cpus |= (1<<id) */
264	btsl	%eax, checkstate_probed_cpus
265
266	popl	%fs
267	popl	%ds			/* restore previous data segment */
268	popl	%ebx
269	popl	%eax
270	iret
271
272
273/*
274 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
275 *
276 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
277 *
278 *  - We need a better method of triggering asts on other cpus.
279 */
280
281	.text
282	SUPERALIGN_TEXT
283	.globl Xcpuast
284Xcpuast:
285	PUSH_FRAME
286	movl	$KDSEL, %eax
287	mov	%ax, %ds		/* use KERNEL data segment */
288	mov	%ax, %es
289	movl	$KPSEL, %eax
290	mov	%ax, %fs
291
292	movl	PCPU(CPUID), %eax
293	lock				/* checkstate_need_ast &= ~(1<<id) */
294	btrl	%eax, checkstate_need_ast
295	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
296
297	lock
298	btsl	%eax, checkstate_pending_ast
299	jc	1f
300
301	FAKE_MCOUNT(13*4(%esp))
302
303	MTX_LOCK_SPIN(sched_lock, 0)
304	movl	PCPU(CURPROC),%ebx
305	orl	$PS_ASTPENDING, P_SFLAG(%ebx)
306
307	movl	PCPU(CPUID), %eax
308	lock
309	btrl	%eax, checkstate_pending_ast
310	lock
311	btrl	%eax, CNAME(resched_cpus)
312	jnc	2f
313	orl	$PS_NEEDRESCHED, P_SFLAG(%ebx)
314	lock
315	incl	CNAME(want_resched_cnt)
3162:
317	MTX_UNLOCK_SPIN(sched_lock)
318	lock
319	incl	CNAME(cpuast_cnt)
320	MEXITCOUNT
321	jmp	doreti
3221:
323	/* We are already in the process of delivering an ast for this CPU */
324	POP_FRAME
325	iret
326
327/*
328 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
329 *
330 *  - Signals its receipt.
331 *  - Waits for permission to restart.
332 *  - Signals its restart.
333 */
334
335	.text
336	SUPERALIGN_TEXT
337	.globl Xcpustop
338Xcpustop:
339	pushl	%ebp
340	movl	%esp, %ebp
341	pushl	%eax
342	pushl	%ecx
343	pushl	%edx
344	pushl	%ds			/* save current data segment */
345	pushl	%fs
346
347	movl	$KDSEL, %eax
348	mov	%ax, %ds		/* use KERNEL data segment */
349	movl	$KPSEL, %eax
350	mov	%ax, %fs
351
352	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
353
354	movl	PCPU(CPUID), %eax
355	imull	$PCB_SIZE, %eax
356	leal	CNAME(stoppcbs)(%eax), %eax
357	pushl	%eax
358	call	CNAME(savectx)		/* Save process context */
359	addl	$4, %esp
360
361
362	movl	PCPU(CPUID), %eax
363
364	lock
365	btsl	%eax, stopped_cpus	/* stopped_cpus |= (1<<id) */
3661:
367	btl	%eax, started_cpus	/* while (!(started_cpus & (1<<id))) */
368	jnc	1b
369
370	lock
371	btrl	%eax, started_cpus	/* started_cpus &= ~(1<<id) */
372	lock
373	btrl	%eax, stopped_cpus	/* stopped_cpus &= ~(1<<id) */
374
375	test	%eax, %eax
376	jnz	2f
377
378	movl	CNAME(cpustop_restartfunc), %eax
379	test	%eax, %eax
380	jz	2f
381	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
382
383	call	*%eax
3842:
385	popl	%fs
386	popl	%ds			/* restore previous data segment */
387	popl	%edx
388	popl	%ecx
389	popl	%eax
390	movl	%ebp, %esp
391	popl	%ebp
392	iret
393
394
395MCOUNT_LABEL(bintr)
396	FAST_INTR(0,fastintr0)
397	FAST_INTR(1,fastintr1)
398	FAST_INTR(2,fastintr2)
399	FAST_INTR(3,fastintr3)
400	FAST_INTR(4,fastintr4)
401	FAST_INTR(5,fastintr5)
402	FAST_INTR(6,fastintr6)
403	FAST_INTR(7,fastintr7)
404	FAST_INTR(8,fastintr8)
405	FAST_INTR(9,fastintr9)
406	FAST_INTR(10,fastintr10)
407	FAST_INTR(11,fastintr11)
408	FAST_INTR(12,fastintr12)
409	FAST_INTR(13,fastintr13)
410	FAST_INTR(14,fastintr14)
411	FAST_INTR(15,fastintr15)
412	FAST_INTR(16,fastintr16)
413	FAST_INTR(17,fastintr17)
414	FAST_INTR(18,fastintr18)
415	FAST_INTR(19,fastintr19)
416	FAST_INTR(20,fastintr20)
417	FAST_INTR(21,fastintr21)
418	FAST_INTR(22,fastintr22)
419	FAST_INTR(23,fastintr23)
420	FAST_INTR(24,fastintr24)
421	FAST_INTR(25,fastintr25)
422	FAST_INTR(26,fastintr26)
423	FAST_INTR(27,fastintr27)
424	FAST_INTR(28,fastintr28)
425	FAST_INTR(29,fastintr29)
426	FAST_INTR(30,fastintr30)
427	FAST_INTR(31,fastintr31)
428#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
429/* Threaded interrupts */
430	INTR(0,intr0, CLKINTR_PENDING)
431	INTR(1,intr1,)
432	INTR(2,intr2,)
433	INTR(3,intr3,)
434	INTR(4,intr4,)
435	INTR(5,intr5,)
436	INTR(6,intr6,)
437	INTR(7,intr7,)
438	INTR(8,intr8,)
439	INTR(9,intr9,)
440	INTR(10,intr10,)
441	INTR(11,intr11,)
442	INTR(12,intr12,)
443	INTR(13,intr13,)
444	INTR(14,intr14,)
445	INTR(15,intr15,)
446	INTR(16,intr16,)
447	INTR(17,intr17,)
448	INTR(18,intr18,)
449	INTR(19,intr19,)
450	INTR(20,intr20,)
451	INTR(21,intr21,)
452	INTR(22,intr22,)
453	INTR(23,intr23,)
454	INTR(24,intr24,)
455	INTR(25,intr25,)
456	INTR(26,intr26,)
457	INTR(27,intr27,)
458	INTR(28,intr28,)
459	INTR(29,intr29,)
460	INTR(30,intr30,)
461	INTR(31,intr31,)
462MCOUNT_LABEL(eintr)
463
464/*
465 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
466 *
467 * - Calls the generic rendezvous action function.
468 */
469	.text
470	SUPERALIGN_TEXT
471	.globl	Xrendezvous
472Xrendezvous:
473	PUSH_FRAME
474	movl	$KDSEL, %eax
475	mov	%ax, %ds		/* use KERNEL data segment */
476	mov	%ax, %es
477	movl	$KPSEL, %eax
478	mov	%ax, %fs
479
480	call	smp_rendezvous_action
481
482	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
483	POP_FRAME
484	iret
485
486
487	.data
488
489#ifdef COUNT_XINVLTLB_HITS
490	.globl	_xhits
491_xhits:
492	.space	(NCPU * 4), 0
493#endif /* COUNT_XINVLTLB_HITS */
494
495/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
496	.globl stopped_cpus, started_cpus
497stopped_cpus:
498	.long	0
499started_cpus:
500	.long	0
501
502	.globl checkstate_probed_cpus
503checkstate_probed_cpus:
504	.long	0
505	.globl checkstate_need_ast
506checkstate_need_ast:
507	.long	0
508checkstate_pending_ast:
509	.long	0
510	.globl CNAME(resched_cpus)
511	.globl CNAME(want_resched_cnt)
512	.globl CNAME(cpuast_cnt)
513	.globl CNAME(cpustop_restartfunc)
514CNAME(resched_cpus):
515	.long 0
516CNAME(want_resched_cnt):
517	.long 0
518CNAME(cpuast_cnt):
519	.long 0
520CNAME(cpustop_restartfunc):
521	.long 0
522
523	.globl	apic_pin_trigger
524apic_pin_trigger:
525	.long	0
526
527	.text
528