/* * from: vector.s, 386BSD 0.1 unknown origin * $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $ */ #include #include /** PEND_INTS, various counters */ #include "i386/isa/intr_machdep.h" /* convert an absolute IRQ# into a bitmask */ #define IRQ_BIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * 'lazy masking' code suggested by Bruce Evans */ #ifdef PEND_INTS /* * the 1st version fails because masked edge-triggered INTs are lost * by the IO APIC. This version tests to see whether we are handling * an edge or level triggered INT. Level-triggered INTs must still be * masked as we don't clear the source, and the EOI cycle would allow * recursive INTs to occur. */ #define MAYBE_MASK_IRQ(irq_num) \ lock ; /* MP-safe */ \ btsl $(irq_num),iactive ; /* lazy masking */ \ jc 6f ; /* already active */ \ TRY_ISRLOCK(irq_num) ; /* try to get lock */ \ testl %eax, %eax ; /* did we get it? */ \ jnz 8f ; /* yes, enter kernel */ \ 6: ; /* active or locked */ \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num),_apic_pin_trigger ; \ jz 7f ; /* edge, don't mask */ \ orl $IRQ_BIT(irq_num),_apic_imen ; /* set the mask bit */ \ movl _ioapic,%ecx ; /* ioapic[0] addr */ \ movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ orl $IOART_INTMASK,%eax ; /* set the mask */ \ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; \ lock ; /* MP-safe */ \ orl $IRQ_BIT(irq_num), _ipending ; /* set _ipending bit */ \ IMASK_UNLOCK ; /* exit critical reg */ \ movl $0, lapic_eoi ; /* do the EOI */ \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp ; \ iret ; \ ; \ ALIGN_TEXT ; \ 8: #else /* PEND_INTS */ #define MAYBE_MASK_IRQ(irq_num) \ lock ; /* MP-safe */ \ btsl $(irq_num),iactive ; /* lazy masking */ \ jnc 1f ; /* NOT active */ \ IMASK_LOCK ; /* enter critical reg */\ orl $IRQ_BIT(irq_num),_apic_imen ; /* set the mask bit */ \ movl _ioapic,%ecx ; /* ioapic[0]addr */ \ movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ orl $IOART_INTMASK,%eax ; /* set the mask */ \ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ lock ; /* MP-safe */ \ orl $IRQ_BIT(irq_num), _ipending ; /* set _ipending bit */ \ movl $0, lapic_eoi ; /* do the EOI */ \ IMASK_UNLOCK ; /* exit critical reg */ \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp ; \ iret ; \ ; \ ALIGN_TEXT ; \ 1: ; \ GET_MPLOCK /* SMP Spin lock */ #endif /* PEND_INTS */ #define MAYBE_UNMASK_IRQ(irq_num) \ cli ; /* must unmask _apic_imen and IO APIC atomically */ \ lock ; /* MP-safe */ \ andl $~IRQ_BIT(irq_num),iactive ; \ IMASK_LOCK ; /* enter critical reg */\ testl $IRQ_BIT(irq_num),_apic_imen ; \ je 9f ; \ andl $~IRQ_BIT(irq_num),_apic_imen ; /* clear mask bit */ \ movl _ioapic,%ecx ; /* ioapic[0]addr */ \ movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 9: ; \ IMASK_UNLOCK ; /* exit critical reg */ \ sti /* XXX _doreti repeats the cli/sti */ /* * Macros for interrupt interrupt entry, call to handler, and exit. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ pushl %eax ; /* save only call-used registers */ \ pushl %ecx ; \ pushl %edx ; \ pushl %ds ; \ MAYBE_PUSHL_ES ; \ movl $KDSEL,%eax ; \ movl %ax,%ds ; \ MAYBE_MOVW_AX_ES ; \ FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ GET_ISRLOCK(irq_num) ; \ pushl _intr_unit + (irq_num) * 4 ; \ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ movl $0, lapic_eoi ; \ addl $4,%esp ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ notl %eax ; \ andl _ipending,%eax ; \ jne 2f ; /* yes, maybe handle them */ \ 1: ; \ MEXITCOUNT ; \ REL_ISRLOCK(irq_num) ; \ MAYBE_POPL_ES ; \ popl %ds ; \ popl %edx ; \ popl %ecx ; \ popl %eax ; \ iret ; \ ; \ ALIGN_TEXT ; \ 2: ; \ cmpb $3,_intr_nesting_level ; /* enough stack? */ \ jae 1b ; /* no, return */ \ movl _cpl,%eax ; \ /* XXX next line is probably unnecessary now. */ \ movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ incb _intr_nesting_level ; /* ... really limit it ... */ \ sti ; /* to do this as early as possible */ \ MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ popl %ecx ; /* ... original %ds ... */ \ popl %edx ; \ xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ pushal ; /* build fat frame (grrr) ... */ \ pushl %ecx ; /* ... actually %ds ... */ \ pushl %es ; \ movl $KDSEL,%eax ; \ movl %ax,%es ; \ movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ pushl %eax ; \ subl $4,%esp ; /* junk for unit number */ \ MEXITCOUNT ; \ jmp _doreti #define INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ movl %ax,%ds ; /* ... early for obsolete reasons */ \ movl %ax,%es ; \ MAYBE_MASK_IRQ(irq_num) ; \ movl $0, lapic_eoi ; \ movl _cpl,%eax ; \ testl $IRQ_BIT(irq_num), %eax ; \ jne 3f ; \ incb _intr_nesting_level ; \ __CONCAT(Xresume,irq_num): ; \ FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ incl _cnt+V_INTR ; /* tally interrupts */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ movl _cpl,%eax ; \ pushl %eax ; \ pushl _intr_unit + (irq_num) * 4 ; \ orl _intr_mask + (irq_num) * 4,%eax ; \ movl %eax,_cpl ; \ sti ; \ call *_intr_handler + (irq_num) * 4 ; \ MAYBE_UNMASK_IRQ(irq_num) ; \ MEXITCOUNT ; \ jmp _doreti ; \ ; \ ALIGN_TEXT ; \ 3: ; \ /* XXX skip mcounting here to avoid double count */ \ lock ; /* MP-safe */ \ orl $IRQ_BIT(irq_num), _ipending ; \ REL_ISRLOCK(irq_num) ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp ; \ iret /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl _Xspuriousint _Xspuriousint: #ifdef COUNT_SPURIOUS_INTS ss incl _sihits #endif /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl _Xinvltlb _Xinvltlb: pushl %eax #ifdef COUNT_XINVLTLB_HITS ss movl _cpuid, %eax ss incl _xhits(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ movl $0, lapic_eoi /* End Of Interrupt to APIC */ popl %eax iret /* * Executed by a CPU when it receives an Xcpustop IPI from another CPU, * * - Signals its receipt. * - Waits for permission to restart. * - Signals its restart. */ .text SUPERALIGN_TEXT .globl _Xcpustop _Xcpustop: pushl %eax pushl %ds /* save current data segment */ movl $KDSEL, %eax movl %ax, %ds /* use KERNEL data segment */ movl _cpuid, %eax #ifdef COUNT_CSHITS incl _cshits(,%eax,4) #endif /* COUNT_CSHITS */ ASMPOSTCODE_HI(0x1) lock btsl %eax, _stopped_cpus /* stopped_cpus |= (1<