apic_vector.s revision 73586
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 73586 2001-03-05 04:37:54Z jhb $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURPROC),%ebx ;					\
52	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	intr_unit + (irq_num) * 4 ;				\
54	call	*intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55	addl	$4, %esp ;						\
56	movl	$0, lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURPROC),%ebx ;					\
156	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158  /* entry point used by doreti_unpend for HWIs. */			\
159__CONCAT(Xresume,irq_num): ;						\
160	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
161	pushl	$irq_num;			/* pass the IRQ */	\
162	call	sched_ithd ;						\
163	addl	$4, %esp ;		/* discard the parameter */	\
164;									\
165	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
166	MEXITCOUNT ;							\
167	jmp	doreti
168
169/*
170 * Handle "spurious INTerrupts".
171 * Notes:
172 *  This is different than the "spurious INTerrupt" generated by an
173 *   8259 PIC for missing INTs.  See the APIC documentation for details.
174 *  This routine should NOT do an 'EOI' cycle.
175 */
176	.text
177	SUPERALIGN_TEXT
178	.globl Xspuriousint
179Xspuriousint:
180
181	/* No EOI cycle used here */
182
183	iret
184
185
186/*
187 * Handle TLB shootdowns.
188 */
189	.text
190	SUPERALIGN_TEXT
191	.globl	Xinvltlb
192Xinvltlb:
193	pushl	%eax
194
195#ifdef COUNT_XINVLTLB_HITS
196	pushl	%fs
197	movl	$KPSEL, %eax
198	mov	%ax, %fs
199	movl	PCPU(CPUID), %eax
200	popl	%fs
201	ss
202	incl	_xhits(,%eax,4)
203#endif /* COUNT_XINVLTLB_HITS */
204
205	movl	%cr3, %eax		/* invalidate the TLB */
206	movl	%eax, %cr3
207
208	ss				/* stack segment, avoid %ds load */
209	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
210
211	popl	%eax
212	iret
213
214
215#ifdef BETTER_CLOCK
216
217/*
218 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
219 *
220 *  - Stores current cpu state in checkstate_cpustate[cpuid]
221 *      0 == user, 1 == sys, 2 == intr
222 *  - Stores current process in checkstate_curproc[cpuid]
223 *
224 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
225 *
226 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
227 */
228
229	.text
230	SUPERALIGN_TEXT
231	.globl Xcpucheckstate
232	.globl checkstate_cpustate
233	.globl checkstate_curproc
234	.globl checkstate_pc
235Xcpucheckstate:
236	pushl	%eax
237	pushl	%ebx
238	pushl	%ds			/* save current data segment */
239	pushl	%fs
240
241	movl	$KDSEL, %eax
242	mov	%ax, %ds		/* use KERNEL data segment */
243	movl	$KPSEL, %eax
244	mov	%ax, %fs
245
246	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
247
248	movl	$0, %ebx
249	movl	20(%esp), %eax
250	andl	$3, %eax
251	cmpl	$3, %eax
252	je	1f
253	testl	$PSL_VM, 24(%esp)
254	jne	1f
255	incl	%ebx			/* system or interrupt */
2561:
257	movl	PCPU(CPUID), %eax
258	movl	%ebx, checkstate_cpustate(,%eax,4)
259	movl	PCPU(CURPROC), %ebx
260	movl	%ebx, checkstate_curproc(,%eax,4)
261
262	movl	16(%esp), %ebx
263	movl	%ebx, checkstate_pc(,%eax,4)
264
265	lock				/* checkstate_probed_cpus |= (1<<id) */
266	btsl	%eax, checkstate_probed_cpus
267
268	popl	%fs
269	popl	%ds			/* restore previous data segment */
270	popl	%ebx
271	popl	%eax
272	iret
273
274#endif /* BETTER_CLOCK */
275
276/*
277 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
278 *
279 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
280 *
281 *  - We need a better method of triggering asts on other cpus.
282 */
283
284	.text
285	SUPERALIGN_TEXT
286	.globl Xcpuast
287Xcpuast:
288	PUSH_FRAME
289	movl	$KDSEL, %eax
290	mov	%ax, %ds		/* use KERNEL data segment */
291	mov	%ax, %es
292	movl	$KPSEL, %eax
293	mov	%ax, %fs
294
295	movl	PCPU(CPUID), %eax
296	lock				/* checkstate_need_ast &= ~(1<<id) */
297	btrl	%eax, checkstate_need_ast
298	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
299
300	lock
301	btsl	%eax, checkstate_pending_ast
302	jc	1f
303
304	FAKE_MCOUNT(13*4(%esp))
305
306	MTX_LOCK_SPIN(sched_lock, 0)
307	movl	PCPU(CURPROC),%ebx
308	orl	$PS_ASTPENDING, P_SFLAG(%ebx)
309
310	movl	PCPU(CPUID), %eax
311	lock
312	btrl	%eax, checkstate_pending_ast
313	lock
314	btrl	%eax, CNAME(resched_cpus)
315	jnc	2f
316	orl	$PS_NEEDRESCHED, P_SFLAG(%ebx)
317	lock
318	incl	CNAME(want_resched_cnt)
3192:
320	MTX_UNLOCK_SPIN(sched_lock)
321	lock
322	incl	CNAME(cpuast_cnt)
323	MEXITCOUNT
324	jmp	doreti
3251:
326	/* We are already in the process of delivering an ast for this CPU */
327	POP_FRAME
328	iret
329
330/*
331 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
332 *
333 *  - Signals its receipt.
334 *  - Waits for permission to restart.
335 *  - Signals its restart.
336 */
337
338	.text
339	SUPERALIGN_TEXT
340	.globl Xcpustop
341Xcpustop:
342	pushl	%ebp
343	movl	%esp, %ebp
344	pushl	%eax
345	pushl	%ecx
346	pushl	%edx
347	pushl	%ds			/* save current data segment */
348	pushl	%fs
349
350	movl	$KDSEL, %eax
351	mov	%ax, %ds		/* use KERNEL data segment */
352	movl	$KPSEL, %eax
353	mov	%ax, %fs
354
355	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
356
357	movl	PCPU(CPUID), %eax
358	imull	$PCB_SIZE, %eax
359	leal	CNAME(stoppcbs)(%eax), %eax
360	pushl	%eax
361	call	CNAME(savectx)		/* Save process context */
362	addl	$4, %esp
363
364
365	movl	PCPU(CPUID), %eax
366
367	lock
368	btsl	%eax, stopped_cpus	/* stopped_cpus |= (1<<id) */
3691:
370	btl	%eax, started_cpus	/* while (!(started_cpus & (1<<id))) */
371	jnc	1b
372
373	lock
374	btrl	%eax, started_cpus	/* started_cpus &= ~(1<<id) */
375	lock
376	btrl	%eax, stopped_cpus	/* stopped_cpus &= ~(1<<id) */
377
378	test	%eax, %eax
379	jnz	2f
380
381	movl	CNAME(cpustop_restartfunc), %eax
382	test	%eax, %eax
383	jz	2f
384	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
385
386	call	*%eax
3872:
388	popl	%fs
389	popl	%ds			/* restore previous data segment */
390	popl	%edx
391	popl	%ecx
392	popl	%eax
393	movl	%ebp, %esp
394	popl	%ebp
395	iret
396
397
398MCOUNT_LABEL(bintr)
399	FAST_INTR(0,fastintr0)
400	FAST_INTR(1,fastintr1)
401	FAST_INTR(2,fastintr2)
402	FAST_INTR(3,fastintr3)
403	FAST_INTR(4,fastintr4)
404	FAST_INTR(5,fastintr5)
405	FAST_INTR(6,fastintr6)
406	FAST_INTR(7,fastintr7)
407	FAST_INTR(8,fastintr8)
408	FAST_INTR(9,fastintr9)
409	FAST_INTR(10,fastintr10)
410	FAST_INTR(11,fastintr11)
411	FAST_INTR(12,fastintr12)
412	FAST_INTR(13,fastintr13)
413	FAST_INTR(14,fastintr14)
414	FAST_INTR(15,fastintr15)
415	FAST_INTR(16,fastintr16)
416	FAST_INTR(17,fastintr17)
417	FAST_INTR(18,fastintr18)
418	FAST_INTR(19,fastintr19)
419	FAST_INTR(20,fastintr20)
420	FAST_INTR(21,fastintr21)
421	FAST_INTR(22,fastintr22)
422	FAST_INTR(23,fastintr23)
423	FAST_INTR(24,fastintr24)
424	FAST_INTR(25,fastintr25)
425	FAST_INTR(26,fastintr26)
426	FAST_INTR(27,fastintr27)
427	FAST_INTR(28,fastintr28)
428	FAST_INTR(29,fastintr29)
429	FAST_INTR(30,fastintr30)
430	FAST_INTR(31,fastintr31)
431#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
432/* Threaded interrupts */
433	INTR(0,intr0, CLKINTR_PENDING)
434	INTR(1,intr1,)
435	INTR(2,intr2,)
436	INTR(3,intr3,)
437	INTR(4,intr4,)
438	INTR(5,intr5,)
439	INTR(6,intr6,)
440	INTR(7,intr7,)
441	INTR(8,intr8,)
442	INTR(9,intr9,)
443	INTR(10,intr10,)
444	INTR(11,intr11,)
445	INTR(12,intr12,)
446	INTR(13,intr13,)
447	INTR(14,intr14,)
448	INTR(15,intr15,)
449	INTR(16,intr16,)
450	INTR(17,intr17,)
451	INTR(18,intr18,)
452	INTR(19,intr19,)
453	INTR(20,intr20,)
454	INTR(21,intr21,)
455	INTR(22,intr22,)
456	INTR(23,intr23,)
457	INTR(24,intr24,)
458	INTR(25,intr25,)
459	INTR(26,intr26,)
460	INTR(27,intr27,)
461	INTR(28,intr28,)
462	INTR(29,intr29,)
463	INTR(30,intr30,)
464	INTR(31,intr31,)
465MCOUNT_LABEL(eintr)
466
467/*
468 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
469 *
470 * - Calls the generic rendezvous action function.
471 */
472	.text
473	SUPERALIGN_TEXT
474	.globl	Xrendezvous
475Xrendezvous:
476	PUSH_FRAME
477	movl	$KDSEL, %eax
478	mov	%ax, %ds		/* use KERNEL data segment */
479	mov	%ax, %es
480	movl	$KPSEL, %eax
481	mov	%ax, %fs
482
483	call	smp_rendezvous_action
484
485	movl	$0, lapic+LA_EOI	/* End Of Interrupt to APIC */
486	POP_FRAME
487	iret
488
489
490	.data
491
492#ifdef COUNT_XINVLTLB_HITS
493	.globl	_xhits
494_xhits:
495	.space	(NCPU * 4), 0
496#endif /* COUNT_XINVLTLB_HITS */
497
498/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
499	.globl stopped_cpus, started_cpus
500stopped_cpus:
501	.long	0
502started_cpus:
503	.long	0
504
505#ifdef BETTER_CLOCK
506	.globl checkstate_probed_cpus
507checkstate_probed_cpus:
508	.long	0
509#endif /* BETTER_CLOCK */
510	.globl checkstate_need_ast
511checkstate_need_ast:
512	.long	0
513checkstate_pending_ast:
514	.long	0
515	.globl CNAME(resched_cpus)
516	.globl CNAME(want_resched_cnt)
517	.globl CNAME(cpuast_cnt)
518	.globl CNAME(cpustop_restartfunc)
519CNAME(resched_cpus):
520	.long 0
521CNAME(want_resched_cnt):
522	.long 0
523CNAME(cpuast_cnt):
524	.long 0
525CNAME(cpustop_restartfunc):
526	.long 0
527
528	.globl	apic_pin_trigger
529apic_pin_trigger:
530	.long	0
531
532	.text
533