apic_vector.s revision 71337
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: head/sys/i386/i386/apic_vector.s 71337 2001-01-21 19:25:07Z jake $
4 */
5
6
7#include <machine/apic.h>
8#include <machine/smp.h>
9
10#include "i386/isa/intr_machdep.h"
11
12/* convert an absolute IRQ# into a bitmask */
13#define IRQ_BIT(irq_num)	(1 << (irq_num))
14
15/* make an index into the IO APIC from the IRQ# */
16#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
17
18/*
19 *
20 */
21#define PUSH_FRAME							\
22	pushl	$0 ;		/* dummy error code */			\
23	pushl	$0 ;		/* dummy trap type */			\
24	pushal ;							\
25	pushl	%ds ;		/* save data and extra segments ... */	\
26	pushl	%es ;							\
27	pushl	%fs
28
29#define POP_FRAME							\
30	popl	%fs ;							\
31	popl	%es ;							\
32	popl	%ds ;							\
33	popal ;								\
34	addl	$4+4,%esp
35
36/*
37 * Macros for interrupt entry, call to handler, and exit.
38 */
39
40#define	FAST_INTR(irq_num, vec_name)					\
41	.text ;								\
42	SUPERALIGN_TEXT ;						\
43IDTVEC(vec_name) ;							\
44	PUSH_FRAME ;							\
45	movl	$KDSEL,%eax ;						\
46	mov	%ax,%ds ;						\
47	mov	%ax,%es ;						\
48	movl	$KPSEL,%eax ;						\
49	mov	%ax,%fs ;						\
50	FAKE_MCOUNT(13*4(%esp)) ;					\
51	movl	PCPU(CURPROC),%ebx ;					\
52	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
53	pushl	_intr_unit + (irq_num) * 4 ;				\
54	call	*_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
55	addl	$4, %esp ;						\
56	movl	$0, _lapic+LA_EOI ;					\
57	lock ; 								\
58	incl	_cnt+V_INTR ;	/* book-keeping can wait */		\
59	movl	_intr_countp + (irq_num) * 4, %eax ;			\
60	lock ; 								\
61	incl	(%eax) ;						\
62	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
63	MEXITCOUNT ;							\
64	jmp	_doreti
65
66#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
67#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
68
69#define MASK_IRQ(irq_num)						\
70	IMASK_LOCK ;				/* into critical reg */	\
71	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
72	jne	7f ;			/* masked, don't mask */	\
73	orl	$IRQ_BIT(irq_num), _apic_imen ;	/* set the mask bit */	\
74	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
75	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
76	movl	%eax, (%ecx) ;			/* write the index */	\
77	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
78	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
79	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
807: ;						/* already masked */	\
81	IMASK_UNLOCK
82/*
83 * Test to see whether we are handling an edge or level triggered INT.
84 *  Level-triggered INTs must still be masked as we don't clear the source,
85 *  and the EOI cycle would cause redundant INTs to occur.
86 */
87#define MASK_LEVEL_IRQ(irq_num)						\
88	testl	$IRQ_BIT(irq_num), _apic_pin_trigger ;			\
89	jz	9f ;				/* edge, don't mask */	\
90	MASK_IRQ(irq_num) ;						\
919:
92
93
94#ifdef APIC_INTR_REORDER
95#define EOI_IRQ(irq_num)						\
96	movl	_apic_isrbit_location + 8 * (irq_num), %eax ;		\
97	movl	(%eax), %eax ;						\
98	testl	_apic_isrbit_location + 4 + 8 * (irq_num), %eax ;	\
99	jz	9f ;				/* not active */	\
100	movl	$0, _lapic+LA_EOI ;					\
1019:
102
103#else
104#define EOI_IRQ(irq_num)						\
105	testl	$IRQ_BIT(irq_num), _lapic+LA_ISR1;			\
106	jz	9f	;			/* not active */	\
107	movl	$0, _lapic+LA_EOI;					\
1089:
109#endif
110
111
112/*
113 * Test to see if the source is currently masked, clear if so.
114 */
115#define UNMASK_IRQ(irq_num)					\
116	IMASK_LOCK ;				/* into critical reg */	\
117	testl	$IRQ_BIT(irq_num), _apic_imen ;				\
118	je	7f ;			/* bit clear, not masked */	\
119	andl	$~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	andl	$~IOART_INTMASK, %eax ;		/* clear the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already unmasked */	\
127	IMASK_UNLOCK
128
129/*
130 * Slow, threaded interrupts.
131 *
132 * XXX Most of the parameters here are obsolete.  Fix this when we're
133 * done.
134 * XXX we really shouldn't return via doreti if we just schedule the
135 * interrupt handler and don't run anything.  We could just do an
136 * iret.  FIXME.
137 */
138#define	INTR(irq_num, vec_name, maybe_extra_ipending)			\
139	.text ;								\
140	SUPERALIGN_TEXT ;						\
141/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */		\
142IDTVEC(vec_name) ;							\
143	PUSH_FRAME ;							\
144	movl	$KDSEL, %eax ;	/* reload with kernel's data segment */	\
145	mov	%ax, %ds ;						\
146	mov	%ax, %es ;						\
147	movl	$KPSEL, %eax ;						\
148	mov	%ax, %fs ;						\
149;									\
150	maybe_extra_ipending ;						\
151;									\
152	MASK_LEVEL_IRQ(irq_num) ;					\
153	EOI_IRQ(irq_num) ;						\
1540: ;									\
155	movl	PCPU(CURPROC),%ebx ;					\
156	incl	P_INTR_NESTING_LEVEL(%ebx) ;				\
157;	 								\
158  /* entry point used by doreti_unpend for HWIs. */			\
159__CONCAT(Xresume,irq_num): ;						\
160	FAKE_MCOUNT(13*4(%esp)) ;		/* XXX avoid dbl cnt */ \
161	pushl	$irq_num;			/* pass the IRQ */	\
162	sti ;								\
163	call	_sched_ithd ;						\
164	addl	$4, %esp ;		/* discard the parameter */	\
165;									\
166	decl	P_INTR_NESTING_LEVEL(%ebx) ;				\
167	MEXITCOUNT ;							\
168	jmp	_doreti
169
170/*
171 * Handle "spurious INTerrupts".
172 * Notes:
173 *  This is different than the "spurious INTerrupt" generated by an
174 *   8259 PIC for missing INTs.  See the APIC documentation for details.
175 *  This routine should NOT do an 'EOI' cycle.
176 */
177	.text
178	SUPERALIGN_TEXT
179	.globl _Xspuriousint
180_Xspuriousint:
181
182	/* No EOI cycle used here */
183
184	iret
185
186
187/*
188 * Handle TLB shootdowns.
189 */
190	.text
191	SUPERALIGN_TEXT
192	.globl	_Xinvltlb
193_Xinvltlb:
194	pushl	%eax
195
196#ifdef COUNT_XINVLTLB_HITS
197	pushl	%fs
198	movl	$KPSEL, %eax
199	mov	%ax, %fs
200	movl	PCPU(CPUID), %eax
201	popl	%fs
202	ss
203	incl	_xhits(,%eax,4)
204#endif /* COUNT_XINVLTLB_HITS */
205
206	movl	%cr3, %eax		/* invalidate the TLB */
207	movl	%eax, %cr3
208
209	ss				/* stack segment, avoid %ds load */
210	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
211
212	popl	%eax
213	iret
214
215
216#ifdef BETTER_CLOCK
217
218/*
219 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
220 *
221 *  - Stores current cpu state in checkstate_cpustate[cpuid]
222 *      0 == user, 1 == sys, 2 == intr
223 *  - Stores current process in checkstate_curproc[cpuid]
224 *
225 *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
226 *
227 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
228 */
229
230	.text
231	SUPERALIGN_TEXT
232	.globl _Xcpucheckstate
233	.globl _checkstate_cpustate
234	.globl _checkstate_curproc
235	.globl _checkstate_pc
236_Xcpucheckstate:
237	pushl	%eax
238	pushl	%ebx
239	pushl	%ds			/* save current data segment */
240	pushl	%fs
241
242	movl	$KDSEL, %eax
243	mov	%ax, %ds		/* use KERNEL data segment */
244	movl	$KPSEL, %eax
245	mov	%ax, %fs
246
247	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
248
249	movl	$0, %ebx
250	movl	20(%esp), %eax
251	andl	$3, %eax
252	cmpl	$3, %eax
253	je	1f
254	testl	$PSL_VM, 24(%esp)
255	jne	1f
256	incl	%ebx			/* system or interrupt */
2571:
258	movl	PCPU(CPUID), %eax
259	movl	%ebx, _checkstate_cpustate(,%eax,4)
260	movl	PCPU(CURPROC), %ebx
261	movl	%ebx, _checkstate_curproc(,%eax,4)
262
263	movl	16(%esp), %ebx
264	movl	%ebx, _checkstate_pc(,%eax,4)
265
266	lock				/* checkstate_probed_cpus |= (1<<id) */
267	btsl	%eax, _checkstate_probed_cpus
268
269	popl	%fs
270	popl	%ds			/* restore previous data segment */
271	popl	%ebx
272	popl	%eax
273	iret
274
275#endif /* BETTER_CLOCK */
276
277/*
278 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
279 *
280 *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
281 *
282 *  - We need a better method of triggering asts on other cpus.
283 */
284
285	.text
286	SUPERALIGN_TEXT
287	.globl _Xcpuast
288_Xcpuast:
289	PUSH_FRAME
290	movl	$KDSEL, %eax
291	mov	%ax, %ds		/* use KERNEL data segment */
292	mov	%ax, %es
293	movl	$KPSEL, %eax
294	mov	%ax, %fs
295
296	movl	PCPU(CPUID), %eax
297	lock				/* checkstate_need_ast &= ~(1<<id) */
298	btrl	%eax, _checkstate_need_ast
299	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
300
301	lock
302	btsl	%eax, _checkstate_pending_ast
303	jc	1f
304
305	FAKE_MCOUNT(13*4(%esp))
306
307	orl	$AST_PENDING, PCPU(ASTPENDING)	/* XXX */
308	movl	PCPU(CURPROC),%ebx
309	incl	P_INTR_NESTING_LEVEL(%ebx)
310	sti
311
312	movl	PCPU(CPUID), %eax
313	lock
314	btrl	%eax, _checkstate_pending_ast
315	lock
316	btrl	%eax, CNAME(resched_cpus)
317	jnc	2f
318	orl	$AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
319	lock
320	incl	CNAME(want_resched_cnt)
3212:
322	lock
323	incl	CNAME(cpuast_cnt)
324	decl	P_INTR_NESTING_LEVEL(%ebx)
325	MEXITCOUNT
326	jmp	_doreti
3271:
328	/* We are already in the process of delivering an ast for this CPU */
329	POP_FRAME
330	iret
331
332#if 0
333
334/*
335 *	 Executed by a CPU when it receives an XFORWARD_IRQ IPI.
336 */
337
338	.text
339	SUPERALIGN_TEXT
340	.globl _Xforward_irq
341_Xforward_irq:
342	PUSH_FRAME
343	movl	$KDSEL, %eax
344	mov	%ax, %ds		/* use KERNEL data segment */
345	mov	%ax, %es
346	movl	$KPSEL, %eax
347	mov	%ax, %fs
348
349	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
350
351	FAKE_MCOUNT(13*4(%esp))
352
353	lock
354	incl	CNAME(forward_irq_hitcnt)
355	cmpb	$4, PCPU(INTR_NESTING_LEVEL)
356	jae	1f
357
358	incb	PCPU(INTR_NESTING_LEVEL)
359	sti
360
361	MEXITCOUNT
362	jmp	doreti_next		/* Handle forwarded interrupt */
3631:
364	lock
365	incl	CNAME(forward_irq_toodeepcnt)
366	MEXITCOUNT
367	POP_FRAME
368	iret
369
370/*
371 *
372 */
373forward_irq:
374	MCOUNT
375	cmpl	$0,_invltlb_ok
376	jz	4f
377
378	cmpl	$0, CNAME(forward_irq_enabled)
379	jz	4f
380
381/* XXX - this is broken now, because mp_lock doesn't exist
382	movl	_mp_lock,%eax
383	cmpl	$FREE_LOCK,%eax
384	jne	1f
385 */
386	movl	$0, %eax		/* Pick CPU #0 if noone has lock */
3871:
388	shrl	$24,%eax
389	movl	_cpu_num_to_apic_id(,%eax,4),%ecx
390	shll	$24,%ecx
391	movl	_lapic+LA_ICR_HI, %eax
392	andl	$~APIC_ID_MASK, %eax
393	orl	%ecx, %eax
394	movl	%eax, _lapic+LA_ICR_HI
395
3962:
397	movl	_lapic+LA_ICR_LO, %eax
398	andl	$APIC_DELSTAT_MASK,%eax
399	jnz	2b
400	movl	_lapic+LA_ICR_LO, %eax
401	andl	$APIC_RESV2_MASK, %eax
402	orl	$(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
403	movl	%eax, _lapic+LA_ICR_LO
4043:
405	movl	_lapic+LA_ICR_LO, %eax
406	andl	$APIC_DELSTAT_MASK,%eax
407	jnz	3b
4084:
409	ret
410#endif
411
412/*
413 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
414 *
415 *  - Signals its receipt.
416 *  - Waits for permission to restart.
417 *  - Signals its restart.
418 */
419
420	.text
421	SUPERALIGN_TEXT
422	.globl _Xcpustop
423_Xcpustop:
424	pushl	%ebp
425	movl	%esp, %ebp
426	pushl	%eax
427	pushl	%ecx
428	pushl	%edx
429	pushl	%ds			/* save current data segment */
430	pushl	%fs
431
432	movl	$KDSEL, %eax
433	mov	%ax, %ds		/* use KERNEL data segment */
434	movl	$KPSEL, %eax
435	mov	%ax, %fs
436
437	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
438
439	movl	PCPU(CPUID), %eax
440	imull	$PCB_SIZE, %eax
441	leal	CNAME(stoppcbs)(%eax), %eax
442	pushl	%eax
443	call	CNAME(savectx)		/* Save process context */
444	addl	$4, %esp
445
446
447	movl	PCPU(CPUID), %eax
448
449	lock
450	btsl	%eax, _stopped_cpus	/* stopped_cpus |= (1<<id) */
4511:
452	btl	%eax, _started_cpus	/* while (!(started_cpus & (1<<id))) */
453	jnc	1b
454
455	lock
456	btrl	%eax, _started_cpus	/* started_cpus &= ~(1<<id) */
457	lock
458	btrl	%eax, _stopped_cpus	/* stopped_cpus &= ~(1<<id) */
459
460	test	%eax, %eax
461	jnz	2f
462
463	movl	CNAME(cpustop_restartfunc), %eax
464	test	%eax, %eax
465	jz	2f
466	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
467
468	call	*%eax
4692:
470	popl	%fs
471	popl	%ds			/* restore previous data segment */
472	popl	%edx
473	popl	%ecx
474	popl	%eax
475	movl	%ebp, %esp
476	popl	%ebp
477	iret
478
479
480MCOUNT_LABEL(bintr)
481	FAST_INTR(0,fastintr0)
482	FAST_INTR(1,fastintr1)
483	FAST_INTR(2,fastintr2)
484	FAST_INTR(3,fastintr3)
485	FAST_INTR(4,fastintr4)
486	FAST_INTR(5,fastintr5)
487	FAST_INTR(6,fastintr6)
488	FAST_INTR(7,fastintr7)
489	FAST_INTR(8,fastintr8)
490	FAST_INTR(9,fastintr9)
491	FAST_INTR(10,fastintr10)
492	FAST_INTR(11,fastintr11)
493	FAST_INTR(12,fastintr12)
494	FAST_INTR(13,fastintr13)
495	FAST_INTR(14,fastintr14)
496	FAST_INTR(15,fastintr15)
497	FAST_INTR(16,fastintr16)
498	FAST_INTR(17,fastintr17)
499	FAST_INTR(18,fastintr18)
500	FAST_INTR(19,fastintr19)
501	FAST_INTR(20,fastintr20)
502	FAST_INTR(21,fastintr21)
503	FAST_INTR(22,fastintr22)
504	FAST_INTR(23,fastintr23)
505	FAST_INTR(24,fastintr24)
506	FAST_INTR(25,fastintr25)
507	FAST_INTR(26,fastintr26)
508	FAST_INTR(27,fastintr27)
509	FAST_INTR(28,fastintr28)
510	FAST_INTR(29,fastintr29)
511	FAST_INTR(30,fastintr30)
512	FAST_INTR(31,fastintr31)
513#define	CLKINTR_PENDING	movl $1,CNAME(clkintr_pending)
514/* Threaded interrupts */
515	INTR(0,intr0, CLKINTR_PENDING)
516	INTR(1,intr1,)
517	INTR(2,intr2,)
518	INTR(3,intr3,)
519	INTR(4,intr4,)
520	INTR(5,intr5,)
521	INTR(6,intr6,)
522	INTR(7,intr7,)
523	INTR(8,intr8,)
524	INTR(9,intr9,)
525	INTR(10,intr10,)
526	INTR(11,intr11,)
527	INTR(12,intr12,)
528	INTR(13,intr13,)
529	INTR(14,intr14,)
530	INTR(15,intr15,)
531	INTR(16,intr16,)
532	INTR(17,intr17,)
533	INTR(18,intr18,)
534	INTR(19,intr19,)
535	INTR(20,intr20,)
536	INTR(21,intr21,)
537	INTR(22,intr22,)
538	INTR(23,intr23,)
539	INTR(24,intr24,)
540	INTR(25,intr25,)
541	INTR(26,intr26,)
542	INTR(27,intr27,)
543	INTR(28,intr28,)
544	INTR(29,intr29,)
545	INTR(30,intr30,)
546	INTR(31,intr31,)
547MCOUNT_LABEL(eintr)
548
549/*
550 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
551 *
552 * - Calls the generic rendezvous action function.
553 */
554	.text
555	SUPERALIGN_TEXT
556	.globl	_Xrendezvous
557_Xrendezvous:
558	PUSH_FRAME
559	movl	$KDSEL, %eax
560	mov	%ax, %ds		/* use KERNEL data segment */
561	mov	%ax, %es
562	movl	$KPSEL, %eax
563	mov	%ax, %fs
564
565	call	_smp_rendezvous_action
566
567	movl	$0, _lapic+LA_EOI	/* End Of Interrupt to APIC */
568	POP_FRAME
569	iret
570
571
572	.data
573#if 0
574/* active flag for lazy masking */
575iactive:
576	.long	0
577#endif
578
579#ifdef COUNT_XINVLTLB_HITS
580	.globl	_xhits
581_xhits:
582	.space	(NCPU * 4), 0
583#endif /* COUNT_XINVLTLB_HITS */
584
585/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
586	.globl _stopped_cpus, _started_cpus
587_stopped_cpus:
588	.long	0
589_started_cpus:
590	.long	0
591
592#ifdef BETTER_CLOCK
593	.globl _checkstate_probed_cpus
594_checkstate_probed_cpus:
595	.long	0
596#endif /* BETTER_CLOCK */
597	.globl _checkstate_need_ast
598_checkstate_need_ast:
599	.long	0
600_checkstate_pending_ast:
601	.long	0
602	.globl CNAME(forward_irq_misscnt)
603	.globl CNAME(forward_irq_toodeepcnt)
604	.globl CNAME(forward_irq_hitcnt)
605	.globl CNAME(resched_cpus)
606	.globl CNAME(want_resched_cnt)
607	.globl CNAME(cpuast_cnt)
608	.globl CNAME(cpustop_restartfunc)
609CNAME(forward_irq_misscnt):
610	.long 0
611CNAME(forward_irq_hitcnt):
612	.long 0
613CNAME(forward_irq_toodeepcnt):
614	.long 0
615CNAME(resched_cpus):
616	.long 0
617CNAME(want_resched_cnt):
618	.long 0
619CNAME(cpuast_cnt):
620	.long 0
621CNAME(cpustop_restartfunc):
622	.long 0
623
624	.globl	_apic_pin_trigger
625_apic_pin_trigger:
626	.long	0
627
628	.text
629