apic_vector.s revision 72276
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 72276 2001-02-10 02:20:34Z jhb $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURPROC),%ebx ;					\
52	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	_intr_unit + (irq_num) * 4 ;				\
54	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55	addl	$4, %esp ;						\
56	movl	$0, _lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	_intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	_doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, _lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), _lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, _lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURPROC),%ebx ;					\
156	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158  /* entry point used by doreti_unpend for HWIs. */			\
159__CONCAT(Xresume,irq_num): ;						\
160	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
161	pushl	$irq_num;			/* pass the IRQ */	\
162	sti ;								\
163	call	_sched_ithd ;						\
164	addl	$4, %esp ;		/* discard the parameter */	\
165;									\
166	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
167	MEXITCOUNT ;							\
168	jmp	_doreti
169
170/*
171 * Handle "spurious INTerrupts".
172 * Notes:
173 *  This is different than the "spurious INTerrupt" generated by an
174 *   8259 PIC for missing INTs.  See the APIC documentation for details.
175 *  This routine should NOT do an 'EOI' cycle.
176 */
177	.text
178	SUPERALIGN_TEXT
179	.globl _Xspuriousint
180_Xspuriousint:
181
182	/* No EOI cycle used here */
183
184	iret
185
186
187/*
188 * Handle TLB shootdowns.
189 */
190	.text
191	SUPERALIGN_TEXT
192	.globl	_Xinvltlb
193_Xinvltlb:
194	pushl	%eax
195
196#ifdef COUNT_XINVLTLB_HITS
197	pushl	%fs
198	movl	$KPSEL, %eax
199	mov	%ax, %fs
200	movl	PCPU(CPUID), %eax
201	popl	%fs
202	ss
203	incl	_xhits(,%eax,4)
204#endif /* COUNT_XINVLTLB_HITS */
205
206	movl	%cr3, %eax		/* invalidate the TLB */
207	movl	%eax, %cr3
208
209	ss				/* stack segment, avoid %ds load */
210	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
211
212	popl	%eax
213	iret
214
215
216#ifdef BETTER_CLOCK
217
218/*
219 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
220 *
221 *  - Stores current cpu state in checkstate_cpustate[cpuid]
222 *      0 == user, 1 == sys, 2 == intr
223 *  - Stores current process in checkstate_curproc[cpuid]
224 *
225 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
226 *
227 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
228 */
229
230	.text
231	SUPERALIGN_TEXT
232	.globl _Xcpucheckstate
233	.globl _checkstate_cpustate
234	.globl _checkstate_curproc
235	.globl _checkstate_pc
236_Xcpucheckstate:
237	pushl	%eax
238	pushl	%ebx
239	pushl	%ds			/* save current data segment */
240	pushl	%fs
241
242	movl	$KDSEL, %eax
243	mov	%ax, %ds		/* use KERNEL data segment */
244	movl	$KPSEL, %eax
245	mov	%ax, %fs
246
247	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
248
249	movl	$0, %ebx
250	movl	20(%esp), %eax
251	andl	$3, %eax
252	cmpl	$3, %eax
253	je	1f
254	testl	$PSL_VM, 24(%esp)
255	jne	1f
256	incl	%ebx			/* system or interrupt */
2571:
258	movl	PCPU(CPUID), %eax
259	movl	%ebx, _checkstate_cpustate(,%eax,4)
260	movl	PCPU(CURPROC), %ebx
261	movl	%ebx, _checkstate_curproc(,%eax,4)
262
263	movl	16(%esp), %ebx
264	movl	%ebx, _checkstate_pc(,%eax,4)
265
266	lock				/* checkstate_probed_cpus |= (1<<id) */
267	btsl	%eax, _checkstate_probed_cpus
268
269	popl	%fs
270	popl	%ds			/* restore previous data segment */
271	popl	%ebx
272	popl	%eax
273	iret
274
275#endif /* BETTER_CLOCK */
276
277/*
278 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
279 *
280 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
281 *
282 *  - We need a better method of triggering asts on other cpus.
283 */
284
285	.text
286	SUPERALIGN_TEXT
287	.globl _Xcpuast
288_Xcpuast:
289	PUSH_FRAME
290	movl	$KDSEL, %eax
291	mov	%ax, %ds		/* use KERNEL data segment */
292	mov	%ax, %es
293	movl	$KPSEL, %eax
294	mov	%ax, %fs
295
296	movl	PCPU(CPUID), %eax
297	lock				/* checkstate_need_ast &= ~(1<<id) */
298	btrl	%eax, _checkstate_need_ast
299	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
300
301	lock
302	btsl	%eax, _checkstate_pending_ast
303	jc	1f
304
305	FAKE_MCOUNT(13*4(%esp))
306
307	MTX_LOCK_SPIN(sched_lock, 0)
308	movl	PCPU(CURPROC),%ebx
309	orl	$PS_ASTPENDING, P_SFLAG(%ebx)
310
311	movl	PCPU(CPUID), %eax
312	lock
313	btrl	%eax, _checkstate_pending_ast
314	lock
315	btrl	%eax, CNAME(resched_cpus)
316	jnc	2f
317	orl	$PS_NEEDRESCHED, P_SFLAG(%ebx)
318	lock
319	incl	CNAME(want_resched_cnt)
3202:
321	MTX_UNLOCK_SPIN(sched_lock)
322	lock
323	incl	CNAME(cpuast_cnt)
324	MEXITCOUNT
325	jmp	_doreti
3261:
327	/* We are already in the process of delivering an ast for this CPU */
328	POP_FRAME
329	iret
330
331/*
332 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
333 *
334 *  - Signals its receipt.
335 *  - Waits for permission to restart.
336 *  - Signals its restart.
337 */
338
339	.text
340	SUPERALIGN_TEXT
341	.globl _Xcpustop
342_Xcpustop:
343	pushl	%ebp
344	movl	%esp, %ebp
345	pushl	%eax
346	pushl	%ecx
347	pushl	%edx
348	pushl	%ds			/* save current data segment */
349	pushl	%fs
350
351	movl	$KDSEL, %eax
352	mov	%ax, %ds		/* use KERNEL data segment */
353	movl	$KPSEL, %eax
354	mov	%ax, %fs
355
356	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
357
358	movl	PCPU(CPUID), %eax
359	imull	$PCB_SIZE, %eax
360	leal	CNAME(stoppcbs)(%eax), %eax
361	pushl	%eax
362	call	CNAME(savectx)		/* Save process context */
363	addl	$4, %esp
364
365
366	movl	PCPU(CPUID), %eax
367
368	lock
369	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
3701:
371	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
372	jnc	1b
373
374	lock
375	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
376	lock
377	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
378
379	test	%eax, %eax
380	jnz	2f
381
382	movl	CNAME(cpustop_restartfunc), %eax
383	test	%eax, %eax
384	jz	2f
385	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
386
387	call	*%eax
3882:
389	popl	%fs
390	popl	%ds			/* restore previous data segment */
391	popl	%edx
392	popl	%ecx
393	popl	%eax
394	movl	%ebp, %esp
395	popl	%ebp
396	iret
397
398
399MCOUNT_LABEL(bintr)
400	FAST_INTR(0,fastintr0)
401	FAST_INTR(1,fastintr1)
402	FAST_INTR(2,fastintr2)
403	FAST_INTR(3,fastintr3)
404	FAST_INTR(4,fastintr4)
405	FAST_INTR(5,fastintr5)
406	FAST_INTR(6,fastintr6)
407	FAST_INTR(7,fastintr7)
408	FAST_INTR(8,fastintr8)
409	FAST_INTR(9,fastintr9)
410	FAST_INTR(10,fastintr10)
411	FAST_INTR(11,fastintr11)
412	FAST_INTR(12,fastintr12)
413	FAST_INTR(13,fastintr13)
414	FAST_INTR(14,fastintr14)
415	FAST_INTR(15,fastintr15)
416	FAST_INTR(16,fastintr16)
417	FAST_INTR(17,fastintr17)
418	FAST_INTR(18,fastintr18)
419	FAST_INTR(19,fastintr19)
420	FAST_INTR(20,fastintr20)
421	FAST_INTR(21,fastintr21)
422	FAST_INTR(22,fastintr22)
423	FAST_INTR(23,fastintr23)
424	FAST_INTR(24,fastintr24)
425	FAST_INTR(25,fastintr25)
426	FAST_INTR(26,fastintr26)
427	FAST_INTR(27,fastintr27)
428	FAST_INTR(28,fastintr28)
429	FAST_INTR(29,fastintr29)
430	FAST_INTR(30,fastintr30)
431	FAST_INTR(31,fastintr31)
432#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
433/* Threaded interrupts */
434	INTR(0,intr0, CLKINTR_PENDING)
435	INTR(1,intr1,)
436	INTR(2,intr2,)
437	INTR(3,intr3,)
438	INTR(4,intr4,)
439	INTR(5,intr5,)
440	INTR(6,intr6,)
441	INTR(7,intr7,)
442	INTR(8,intr8,)
443	INTR(9,intr9,)
444	INTR(10,intr10,)
445	INTR(11,intr11,)
446	INTR(12,intr12,)
447	INTR(13,intr13,)
448	INTR(14,intr14,)
449	INTR(15,intr15,)
450	INTR(16,intr16,)
451	INTR(17,intr17,)
452	INTR(18,intr18,)
453	INTR(19,intr19,)
454	INTR(20,intr20,)
455	INTR(21,intr21,)
456	INTR(22,intr22,)
457	INTR(23,intr23,)
458	INTR(24,intr24,)
459	INTR(25,intr25,)
460	INTR(26,intr26,)
461	INTR(27,intr27,)
462	INTR(28,intr28,)
463	INTR(29,intr29,)
464	INTR(30,intr30,)
465	INTR(31,intr31,)
466MCOUNT_LABEL(eintr)
467
468/*
469 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
470 *
471 * - Calls the generic rendezvous action function.
472 */
473	.text
474	SUPERALIGN_TEXT
475	.globl	_Xrendezvous
476_Xrendezvous:
477	PUSH_FRAME
478	movl	$KDSEL, %eax
479	mov	%ax, %ds		/* use KERNEL data segment */
480	mov	%ax, %es
481	movl	$KPSEL, %eax
482	mov	%ax, %fs
483
484	call	_smp_rendezvous_action
485
486	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
487	POP_FRAME
488	iret
489
490
491	.data
492
493#ifdef COUNT_XINVLTLB_HITS
494	.globl	_xhits
495_xhits:
496	.space	(NCPU * 4), 0
497#endif /* COUNT_XINVLTLB_HITS */
498
499/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
500	.globl _stopped_cpus, _started_cpus
501_stopped_cpus:
502	.long	0
503_started_cpus:
504	.long	0
505
506#ifdef BETTER_CLOCK
507	.globl _checkstate_probed_cpus
508_checkstate_probed_cpus:
509	.long	0
510#endif /* BETTER_CLOCK */
511	.globl _checkstate_need_ast
512_checkstate_need_ast:
513	.long	0
514_checkstate_pending_ast:
515	.long	0
516	.globl CNAME(resched_cpus)
517	.globl CNAME(want_resched_cnt)
518	.globl CNAME(cpuast_cnt)
519	.globl CNAME(cpustop_restartfunc)
520CNAME(resched_cpus):
521	.long 0
522CNAME(want_resched_cnt):
523	.long 0
524CNAME(cpuast_cnt):
525	.long 0
526CNAME(cpustop_restartfunc):
527	.long 0
528
529	.globl	_apic_pin_trigger
530_apic_pin_trigger:
531	.long	0
532
533	.text
534