apic_vector.s revision 50251
120253Sjoerg/* 220302Sjoerg * from: vector.s, 386BSD 0.1 unknown origin 320302Sjoerg * $Id: apic_vector.s,v 1.43 1999/07/20 06:52:35 msmith Exp $ 420253Sjoerg */ 520253Sjoerg 620253Sjoerg 720253Sjoerg#include <machine/apic.h> 820253Sjoerg#include <machine/smp.h> 920302Sjoerg 1020253Sjoerg#include "i386/isa/intr_machdep.h" 1120253Sjoerg 1220253Sjoerg 1320253Sjoerg#ifdef FAST_SIMPLELOCK 1420302Sjoerg 1520253Sjoerg#define GET_FAST_INTR_LOCK \ 1620253Sjoerg pushl $_fast_intr_lock ; /* address of lock */ \ 1720302Sjoerg call _s_lock ; /* MP-safe */ \ 1820253Sjoerg addl $4,%esp 1920253Sjoerg 2020253Sjoerg#define REL_FAST_INTR_LOCK \ 2120253Sjoerg movl $0, _fast_intr_lock 2220253Sjoerg 2320253Sjoerg#else /* FAST_SIMPLELOCK */ 2420253Sjoerg 2544229Sdavidn#define GET_FAST_INTR_LOCK \ 2620253Sjoerg call _get_isrlock 2720253Sjoerg 2830259Scharnier#define REL_FAST_INTR_LOCK \ 2930259Scharnier pushl $_mp_lock ; /* GIANT_LOCK */ \ 3050479Speter call _MPrellock ; \ 3130259Scharnier add $4, %esp 3230259Scharnier 3330259Scharnier#endif /* FAST_SIMPLELOCK */ 3430259Scharnier 3520253Sjoerg/* convert an absolute IRQ# into a bitmask */ 3620253Sjoerg#define IRQ_BIT(irq_num) (1 << (irq_num)) 3720253Sjoerg 3830259Scharnier/* make an index into the IO APIC from the IRQ# */ 3920253Sjoerg#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) 4020555Sdavidn 4120555Sdavidn 4220555Sdavidn/* 4364918Sgreen * Macros for interrupt interrupt entry, call to handler, and exit. 44242349Sbapt */ 45242349Sbapt 46242349Sbapt#ifdef FAST_WITHOUTCPL 4720253Sjoerg 4820253Sjoerg/* 4920253Sjoerg */ 5023318Sache#define FAST_INTR(irq_num, vec_name) \ 5122394Sdavidn .text ; \ 5252512Sdavidn SUPERALIGN_TEXT ; \ 5324214SacheIDTVEC(vec_name) ; \ 5444386Sdavidn pushl %eax ; /* save only call-used registers */ \ 5520253Sjoerg pushl %ecx ; \ 5620253Sjoerg pushl %edx ; \ 5720253Sjoerg pushl %ds ; \ 5820253Sjoerg MAYBE_PUSHL_ES ; \ 5920253Sjoerg pushl %fs ; \ 6020253Sjoerg movl $KDSEL,%eax ; \ 6120253Sjoerg movl %ax,%ds ; \ 6220253Sjoerg MAYBE_MOVW_AX_ES ; \ 6320253Sjoerg movl $KPSEL,%eax ; \ 6485145Sache movl %ax,%fs ; \ 6520253Sjoerg FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \ 66283961Sbapt pushl _intr_unit + (irq_num) * 4 ; \ 67283961Sbapt GET_FAST_INTR_LOCK ; \ 68283961Sbapt call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 69283961Sbapt REL_FAST_INTR_LOCK ; \ 70283961Sbapt addl $4, %esp ; \ 71283961Sbapt movl $0, lapic_eoi ; \ 72283961Sbapt lock ; \ 73283961Sbapt incl _cnt+V_INTR ; /* book-keeping can wait */ \ 74283961Sbapt movl _intr_countp + (irq_num) * 4, %eax ; \ 75283961Sbapt lock ; \ 76283961Sbapt incl (%eax) ; \ 77283961Sbapt MEXITCOUNT ; \ 78283961Sbapt popl %fs ; \ 79283961Sbapt MAYBE_POPL_ES ; \ 80283961Sbapt popl %ds ; \ 81283961Sbapt popl %edx ; \ 82283961Sbapt popl %ecx ; \ 83283961Sbapt popl %eax ; \ 84283961Sbapt iret 85283961Sbapt 86283961Sbapt#else /* FAST_WITHOUTCPL */ 87283961Sbapt 8820253Sjoerg#define FAST_INTR(irq_num, vec_name) \ 8920253Sjoerg .text ; \ 9020253Sjoerg SUPERALIGN_TEXT ; \ 9120253SjoergIDTVEC(vec_name) ; \ 9220253Sjoerg pushl %eax ; /* save only call-used registers */ \ 9320253Sjoerg pushl %ecx ; \ 9420253Sjoerg pushl %edx ; \ 9520253Sjoerg pushl %ds ; \ 9620253Sjoerg MAYBE_PUSHL_ES ; \ 9720253Sjoerg pushl %fs ; \ 9820253Sjoerg movl $KDSEL, %eax ; \ 9920253Sjoerg movl %ax, %ds ; \ 10020253Sjoerg MAYBE_MOVW_AX_ES ; \ 10120253Sjoerg movl $KPSEL, %eax ; \ 10220253Sjoerg movl %ax, %fs ; \ 10320253Sjoerg FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \ 10420253Sjoerg GET_FAST_INTR_LOCK ; \ 105124382Siedowse pushl _intr_unit + (irq_num) * 4 ; \ 10620253Sjoerg call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 10720253Sjoerg addl $4, %esp ; \ 10820253Sjoerg movl $0, lapic_eoi ; \ 10920253Sjoerg lock ; \ 11020253Sjoerg incl _cnt+V_INTR ; /* book-keeping can wait */ \ 11120253Sjoerg movl _intr_countp + (irq_num) * 4,%eax ; \ 11220253Sjoerg lock ; \ 11320253Sjoerg incl (%eax) ; \ 11420253Sjoerg movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \ 11520253Sjoerg notl %eax ; \ 11620253Sjoerg andl _ipending, %eax ; \ 11720253Sjoerg jne 2f ; /* yes, maybe handle them */ \ 11820253Sjoerg1: ; \ 11920253Sjoerg MEXITCOUNT ; \ 12020253Sjoerg REL_FAST_INTR_LOCK ; \ 12120253Sjoerg popl %fs ; \ 12220253Sjoerg MAYBE_POPL_ES ; \ 12352527Sdavidn popl %ds ; \ 12420253Sjoerg popl %edx ; \ 12552512Sdavidn popl %ecx ; \ 12620253Sjoerg popl %eax ; \ 12720253Sjoerg iret ; \ 12820253Sjoerg; \ 12920253Sjoerg ALIGN_TEXT ; \ 13020253Sjoerg2: ; \ 13120253Sjoerg cmpb $3, _intr_nesting_level ; /* enough stack? */ \ 13220747Sdavidn jae 1b ; /* no, return */ \ 133283961Sbapt movl _cpl, %eax ; \ 13482868Sdd /* XXX next line is probably unnecessary now. */ \ 135167919Sle movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \ 136167919Sle lock ; \ 13720253Sjoerg incb _intr_nesting_level ; /* ... really limit it ... */ \ 13820253Sjoerg sti ; /* to do this as early as possible */ \ 13920253Sjoerg popl %fs ; /* discard most of thin frame ... */ \ 14020253Sjoerg MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ 14120253Sjoerg popl %ecx ; /* ... original %ds ... */ \ 14220253Sjoerg popl %edx ; \ 14320253Sjoerg xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \ 14420253Sjoerg pushal ; /* build fat frame (grrr) ... */ \ 14520253Sjoerg pushl %ecx ; /* ... actually %ds ... */ \ 14620253Sjoerg pushl %es ; \ 14756000Sdavidn pushl %fs ; 14820253Sjoerg movl $KDSEL, %eax ; \ 14920253Sjoerg movl %ax, %es ; \ 15056000Sdavidn movl $KPSEL, %eax ; 15156000Sdavidn movl %ax, %fs ; 15256000Sdavidn movl (3+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \ 15320253Sjoerg movl %ecx, (3+6)*4(%esp) ; /* ... to fat frame ... */ \ 15420253Sjoerg movl (3+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \ 15552512Sdavidn pushl %eax ; \ 15620253Sjoerg subl $4, %esp ; /* junk for unit number */ \ 15720267Sjoerg MEXITCOUNT ; \ 15820267Sjoerg jmp _doreti 15920267Sjoerg 16020267Sjoerg#endif /** FAST_WITHOUTCPL */ 16120267Sjoerg 16220267Sjoerg 16320267Sjoerg/* 16420267Sjoerg * 165283842Sbapt */ 16620267Sjoerg#define PUSH_FRAME \ 16720267Sjoerg pushl $0 ; /* dummy error code */ \ 16820267Sjoerg pushl $0 ; /* dummy trap type */ \ 16920267Sjoerg pushal ; \ 17020267Sjoerg pushl %ds ; /* save data and extra segments ... */ \ 17120253Sjoerg pushl %es ; \ 17220253Sjoerg pushl %fs 17320253Sjoerg 17420253Sjoerg#define POP_FRAME \ 17520267Sjoerg popl %fs ; \ 17620253Sjoerg popl %es ; \ 17721052Sdavidn popl %ds ; \ 178167919Sle popal ; \ 179167919Sle addl $4+4,%esp 180167919Sle 181167919Sle#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 182167919Sle#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 183219408Sjkim 184167919Sle#define MASK_IRQ(irq_num) \ 185168044Sle IMASK_LOCK ; /* into critical reg */ \ 186167919Sle testl $IRQ_BIT(irq_num), _apic_imen ; \ 18721052Sdavidn jne 7f ; /* masked, don't mask */ \ 18821052Sdavidn orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \ 18921052Sdavidn movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ 19021052Sdavidn movl REDIRIDX(irq_num), %eax ; /* get the index */ \ 191224535Sdelphij movl %eax, (%ecx) ; /* write the index */ \ 19221052Sdavidn movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ 19321052Sdavidn orl $IOART_INTMASK, %eax ; /* set the mask */ \ 19421052Sdavidn movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 19521052Sdavidn7: ; /* already masked */ \ 19621052Sdavidn IMASK_UNLOCK 19721052Sdavidn/* 19830259Scharnier * Test to see whether we are handling an edge or level triggered INT. 19921052Sdavidn * Level-triggered INTs must still be masked as we don't clear the source, 20021052Sdavidn * and the EOI cycle would cause redundant INTs to occur. 20121052Sdavidn */ 20221052Sdavidn#define MASK_LEVEL_IRQ(irq_num) \ 20321242Sdavidn testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \ 20421242Sdavidn jz 9f ; /* edge, don't mask */ \ 20521242Sdavidn MASK_IRQ(irq_num) ; \ 20621242Sdavidn9: 20721242Sdavidn 20821242Sdavidn 20921242Sdavidn#ifdef APIC_INTR_REORDER 210282683Sbapt#define EOI_IRQ(irq_num) \ 211219408Sjkim movl _apic_isrbit_location + 8 * (irq_num), %eax ; \ 21221242Sdavidn movl (%eax), %eax ; \ 213148584Spjd testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ 214148584Spjd jz 9f ; /* not active */ \ 215148584Spjd movl $0, lapic_eoi ; \ 216148584Spjd APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 217148584Spjd9: 21821242Sdavidn 21921242Sdavidn#else 22021242Sdavidn#define EOI_IRQ(irq_num) \ 221130633Srobert testl $IRQ_BIT(irq_num), lapic_isr1; \ 222130633Srobert jz 9f ; /* not active */ \ 22321242Sdavidn movl $0, lapic_eoi; \ 224252377Skientzle APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 22521242Sdavidn9: 22621242Sdavidn#endif 227219408Sjkim 22821242Sdavidn 22921242Sdavidn/* 23021242Sdavidn * Test to see if the source is currntly masked, clear if so. 23130259Scharnier */ 23221242Sdavidn#define UNMASK_IRQ(irq_num) \ 23321242Sdavidn IMASK_LOCK ; /* into critical reg */ \ 23421052Sdavidn testl $IRQ_BIT(irq_num), _apic_imen ; \ 23521242Sdavidn je 7f ; /* bit clear, not masked */ \ 236219408Sjkim andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \ 23730259Scharnier movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \ 23821052Sdavidn movl REDIRIDX(irq_num), %eax ; /* get the index */ \ 23921052Sdavidn movl %eax,(%ecx) ; /* write the index */ \ 24021052Sdavidn movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 24121052Sdavidn andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ 24230259Scharnier movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 24321052Sdavidn7: ; \ 24421052Sdavidn IMASK_UNLOCK 24520253Sjoerg 24620253Sjoerg#ifdef INTR_SIMPLELOCK 24720253Sjoerg#define ENLOCK 24821330Sdavidn#define DELOCK 24921330Sdavidn#define LATELOCK call _get_isrlock 25021330Sdavidn#else 25120253Sjoerg#define ENLOCK \ 25220253Sjoerg ISR_TRYLOCK ; /* XXX this is going away... */ \ 25320253Sjoerg testl %eax, %eax ; /* did we get it? */ \ 25420253Sjoerg jz 3f 25563596Sdavidn#define DELOCK ISR_RELLOCK 25663596Sdavidn#define LATELOCK 25763596Sdavidn#endif 25863596Sdavidn 25963596Sdavidn#ifdef APIC_INTR_DIAGNOSTIC 26063596Sdavidn#ifdef APIC_INTR_DIAGNOSTIC_IRQ 26163596Sdavidnlog_intr_event: 26263596Sdavidn pushf 26320253Sjoerg cli 26420253Sjoerg pushl $CNAME(apic_itrace_debuglock) 26520253Sjoerg call CNAME(s_lock_np) 26620679Sdavidn addl $4, %esp 26720253Sjoerg movl CNAME(apic_itrace_debugbuffer_idx), %ecx 26820253Sjoerg andl $32767, %ecx 26952527Sdavidn movl _cpuid, %eax 27020253Sjoerg shll $8, %eax 27120747Sdavidn orl 8(%esp), %eax 27244229Sdavidn movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2) 27361957Sache incl %ecx 27430259Scharnier andl $32767, %ecx 27520253Sjoerg movl %ecx, CNAME(apic_itrace_debugbuffer_idx) 27620747Sdavidn pushl $CNAME(apic_itrace_debuglock) 27720747Sdavidn call CNAME(s_unlock_np) 27820253Sjoerg addl $4, %esp 27920747Sdavidn popf 28020253Sjoerg ret 28120253Sjoerg 28252527Sdavidn 28320253Sjoerg#define APIC_ITRACE(name, irq_num, id) \ 28426088Sdavidn lock ; /* MP-safe */ \ 28530259Scharnier incl CNAME(name) + (irq_num) * 4 ; \ 28620253Sjoerg pushl %eax ; \ 28752527Sdavidn pushl %ecx ; \ 28820253Sjoerg pushl %edx ; \ 28920253Sjoerg movl $(irq_num), %eax ; \ 29020253Sjoerg cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \ 29163600Sdavidn jne 7f ; \ 29263600Sdavidn pushl $id ; \ 29320253Sjoerg call log_intr_event ; \ 29420253Sjoerg addl $4, %esp ; \ 29530259Scharnier7: ; \ 29620253Sjoerg popl %edx ; \ 29720253Sjoerg popl %ecx ; \ 29820253Sjoerg popl %eax 29920253Sjoerg#else 30020253Sjoerg#define APIC_ITRACE(name, irq_num, id) \ 30120253Sjoerg lock ; /* MP-safe */ \ 30220253Sjoerg incl CNAME(name) + (irq_num) * 4 30320253Sjoerg#endif 30420253Sjoerg 30520253Sjoerg#define APIC_ITRACE_ENTER 1 30620253Sjoerg#define APIC_ITRACE_EOI 2 30720253Sjoerg#define APIC_ITRACE_TRYISRLOCK 3 30820253Sjoerg#define APIC_ITRACE_GOTISRLOCK 4 30920253Sjoerg#define APIC_ITRACE_ENTER2 5 31020253Sjoerg#define APIC_ITRACE_LEAVE 6 31120267Sjoerg#define APIC_ITRACE_UNMASK 7 312283814Sbapt#define APIC_ITRACE_ACTIVE 8 31320253Sjoerg#define APIC_ITRACE_MASKED 9 31452527Sdavidn#define APIC_ITRACE_NOISRLOCK 10 31520253Sjoerg#define APIC_ITRACE_MASKED2 11 31620267Sjoerg#define APIC_ITRACE_SPLZ 12 31744386Sdavidn#define APIC_ITRACE_DORETI 13 31844229Sdavidn 31944229Sdavidn#else 32044386Sdavidn#define APIC_ITRACE(name, irq_num, id) 32144229Sdavidn#endif 32220267Sjoerg 32320253Sjoerg#ifdef CPL_AND_CML 32452527Sdavidn 32520253Sjoerg#define INTR(irq_num, vec_name, maybe_extra_ipending) \ 32644229Sdavidn .text ; \ 32720253Sjoerg SUPERALIGN_TEXT ; \ 32820253Sjoerg/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ 32920253SjoergIDTVEC(vec_name) ; \ 33020253Sjoerg PUSH_FRAME ; \ 33130259Scharnier movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ 33220253Sjoerg movl %ax, %ds ; \ 33320253Sjoerg movl %ax, %es ; \ 33420253Sjoerg movl $KPSEL, %eax ; \ 33520253Sjoerg movl %ax, %fs ; \ 33620253Sjoerg; \ 33720253Sjoerg maybe_extra_ipending ; \ 33843780Sdes; \ 339241108Sbapt APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ 340241108Sbapt lock ; /* MP-safe */ \ 34120253Sjoerg btsl $(irq_num), iactive ; /* lazy masking */ \ 34220253Sjoerg jc 1f ; /* already active */ \ 34320253Sjoerg; \ 344273787Sbapt MASK_LEVEL_IRQ(irq_num) ; \ 345277764Sbapt EOI_IRQ(irq_num) ; \ 346273787Sbapt0: ; \ 34720253Sjoerg APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\ 34852527Sdavidn ENLOCK ; \ 34920253Sjoerg; \ 35020253Sjoerg APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ 35120253Sjoerg AVCPL_LOCK ; /* MP-safe */ \ 35252512Sdavidn testl $IRQ_BIT(irq_num), _cpl ; \ 35352512Sdavidn jne 2f ; /* this INT masked */ \ 35452527Sdavidn testl $IRQ_BIT(irq_num), _cml ; \ 35520253Sjoerg jne 2f ; /* this INT masked */ \ 35644229Sdavidn orl $IRQ_BIT(irq_num), _cil ; \ 35720253Sjoerg AVCPL_UNLOCK ; \ 35820253Sjoerg; \ 35920253Sjoerg incb _intr_nesting_level ; \ 36020253Sjoerg; \ 361283841Sbapt /* entry point used by doreti_unpend for HWIs. */ \ 36244386Sdavidn__CONCAT(Xresume,irq_num): ; \ 36344386Sdavidn FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ 36444386Sdavidn lock ; incl _cnt+V_INTR ; /* tally interrupts */ \ 36520253Sjoerg movl _intr_countp + (irq_num) * 4, %eax ; \ 36620253Sjoerg lock ; incl (%eax) ; \ 36730259Scharnier; \ 36830259Scharnier AVCPL_LOCK ; /* MP-safe */ \ 36920253Sjoerg movl _cml, %eax ; \ 37052527Sdavidn pushl %eax ; \ 37120253Sjoerg orl _intr_mask + (irq_num) * 4, %eax ; \ 37220253Sjoerg movl %eax, _cml ; \ 37320253Sjoerg AVCPL_UNLOCK ; \ 37420253Sjoerg; \ 37552512Sdavidn pushl _intr_unit + (irq_num) * 4 ; \ 37652512Sdavidn incl _inside_intr ; \ 37752512Sdavidn APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ 37852512Sdavidn sti ; \ 37952512Sdavidn call *_intr_handler + (irq_num) * 4 ; \ 38052512Sdavidn cli ; \ 38152512Sdavidn APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ 38252512Sdavidn decl _inside_intr ; \ 38352512Sdavidn; \ 38452512Sdavidn lock ; andl $~IRQ_BIT(irq_num), iactive ; \ 38552512Sdavidn lock ; andl $~IRQ_BIT(irq_num), _cil ; \ 38652512Sdavidn UNMASK_IRQ(irq_num) ; \ 387282685Sbapt APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \ 38852512Sdavidn sti ; /* doreti repeats cli/sti */ \ 38952512Sdavidn MEXITCOUNT ; \ 39052512Sdavidn LATELOCK ; \ 39152527Sdavidn jmp _doreti ; \ 39252512Sdavidn; \ 39352512Sdavidn ALIGN_TEXT ; \ 39452512Sdavidn1: ; /* active */ \ 39552512Sdavidn APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \ 39652527Sdavidn MASK_IRQ(irq_num) ; \ 39752527Sdavidn EOI_IRQ(irq_num) ; \ 39852527Sdavidn AVCPL_LOCK ; /* MP-safe */ \ 39952527Sdavidn lock ; \ 40052527Sdavidn orl $IRQ_BIT(irq_num), _ipending ; \ 40120253Sjoerg AVCPL_UNLOCK ; \ 40220253Sjoerg lock ; \ 40320253Sjoerg btsl $(irq_num), iactive ; /* still active */ \ 404263114Sdteske jnc 0b ; /* retry */ \ 405263114Sdteske POP_FRAME ; \ 40620253Sjoerg iret ; \ 40720253Sjoerg; \ 40830259Scharnier ALIGN_TEXT ; \ 40920253Sjoerg2: ; /* masked by cpl|cml */ \ 41044229Sdavidn APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ 41144229Sdavidn lock ; \ 41285145Sache orl $IRQ_BIT(irq_num), _ipending ; \ 41344229Sdavidn AVCPL_UNLOCK ; \ 41420747Sdavidn DELOCK ; /* XXX this is going away... */ \ 41585145Sache POP_FRAME ; \ 41620747Sdavidn iret ; \ 41744229Sdavidn ALIGN_TEXT ; \ 41844229Sdavidn3: ; /* other cpu has isr lock */ \ 41944229Sdavidn APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ 420243895Seadler AVCPL_LOCK ; /* MP-safe */ \ 42144229Sdavidn lock ; \ 422282700Sbapt orl $IRQ_BIT(irq_num), _ipending ; \ 42344229Sdavidn testl $IRQ_BIT(irq_num), _cpl ; \ 42444229Sdavidn jne 4f ; /* this INT masked */ \ 42520253Sjoerg testl $IRQ_BIT(irq_num), _cml ; \ 42620253Sjoerg jne 4f ; /* this INT masked */ \ 42720253Sjoerg orl $IRQ_BIT(irq_num), _cil ; \ 42820253Sjoerg AVCPL_UNLOCK ; \ 42920253Sjoerg call forward_irq ; /* forward irq to lock holder */ \ 430282700Sbapt POP_FRAME ; /* and return */ \ 431130633Srobert iret ; \ 432263114Sdteske ALIGN_TEXT ; \ 433263114Sdteske4: ; /* blocked */ \ 434263114Sdteske APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\ 435263114Sdteske AVCPL_UNLOCK ; \ 436263114Sdteske POP_FRAME ; /* and return */ \ 43720253Sjoerg iret 43852502Sdavidn 43952502Sdavidn#else /* CPL_AND_CML */ 44052502Sdavidn 441283814Sbapt 442283814Sbapt#define INTR(irq_num, vec_name, maybe_extra_ipending) \ 44321330Sdavidn .text ; \ 44452502Sdavidn SUPERALIGN_TEXT ; \ 44552502Sdavidn/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ 44652502SdavidnIDTVEC(vec_name) ; \ 44752502Sdavidn PUSH_FRAME ; \ 44852502Sdavidn movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ 44956000Sdavidn movl %ax, %ds ; \ 45052502Sdavidn movl %ax, %es ; \ 45152502Sdavidn movl $KPSEL, %eax ; \ 45252502Sdavidn movl %ax, %fs ; \ 453242349Sbapt; \ 454262865Sjulian maybe_extra_ipending ; \ 455263114Sdteske; \ 456263114Sdteske APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ 457242349Sbapt lock ; /* MP-safe */ \ 458242349Sbapt btsl $(irq_num), iactive ; /* lazy masking */ \ 459242349Sbapt jc 1f ; /* already active */ \ 460267970Smjg; \ 461242349Sbapt MASK_LEVEL_IRQ(irq_num) ; \ 462262865Sjulian EOI_IRQ(irq_num) ; \ 463262865Sjulian0: ; \ 464262865Sjulian APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\ 465267970Smjg ISR_TRYLOCK ; /* XXX this is going away... */ \ 466267970Smjg testl %eax, %eax ; /* did we get it? */ \ 467262865Sjulian jz 3f ; /* no */ \ 468262865Sjulian; \ 469262865Sjulian APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ 470242349Sbapt AVCPL_LOCK ; /* MP-safe */ \ 471242349Sbapt testl $IRQ_BIT(irq_num), _cpl ; \ 472242349Sbapt jne 2f ; /* this INT masked */ \ 473242349Sbapt AVCPL_UNLOCK ; \ 47420253Sjoerg; \ 475283842Sbapt incb _intr_nesting_level ; \ 47620253Sjoerg; \ 477283961Sbapt /* entry point used by doreti_unpend for HWIs. */ \ 47844229Sdavidn__CONCAT(Xresume,irq_num): ; \ 47944229Sdavidn FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ 48044229Sdavidn lock ; incl _cnt+V_INTR ; /* tally interrupts */ \ 48144229Sdavidn movl _intr_countp + (irq_num) * 4, %eax ; \ 48220253Sjoerg lock ; incl (%eax) ; \ 48344229Sdavidn; \ 48444229Sdavidn AVCPL_LOCK ; /* MP-safe */ \ 48544229Sdavidn movl _cpl, %eax ; \ 48644229Sdavidn pushl %eax ; \ 48744229Sdavidn orl _intr_mask + (irq_num) * 4, %eax ; \ 48820253Sjoerg movl %eax, _cpl ; \ 48944229Sdavidn lock ; \ 49044229Sdavidn andl $~IRQ_BIT(irq_num), _ipending ; \ 49144229Sdavidn AVCPL_UNLOCK ; \ 49244229Sdavidn; \ 49344229Sdavidn pushl _intr_unit + (irq_num) * 4 ; \ 49444229Sdavidn APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ 495283842Sbapt sti ; \ 496283842Sbapt call *_intr_handler + (irq_num) * 4 ; \ 49744229Sdavidn cli ; \ 49844229Sdavidn APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ 49920253Sjoerg; \ 50020253Sjoerg lock ; andl $~IRQ_BIT(irq_num), iactive ; \ 50120267Sjoerg UNMASK_IRQ(irq_num) ; \ 50220253Sjoerg APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \ 50344386Sdavidn sti ; /* doreti repeats cli/sti */ \ 50444386Sdavidn MEXITCOUNT ; \ 50544386Sdavidn jmp _doreti ; \ 50620253Sjoerg; \ 50720253Sjoerg ALIGN_TEXT ; \ 50820253Sjoerg1: ; /* active */ \ 50920253Sjoerg APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \ 51020253Sjoerg MASK_IRQ(irq_num) ; \ 51120253Sjoerg EOI_IRQ(irq_num) ; \ 51230259Scharnier AVCPL_LOCK ; /* MP-safe */ \ 51320679Sdavidn lock ; \ 51452527Sdavidn orl $IRQ_BIT(irq_num), _ipending ; \ 51520253Sjoerg AVCPL_UNLOCK ; \ 51652527Sdavidn lock ; \ 51761957Sache btsl $(irq_num), iactive ; /* still active */ \ 51820253Sjoerg jnc 0b ; /* retry */ \ 51952527Sdavidn POP_FRAME ; \ 52020253Sjoerg iret ; /* XXX: iactive bit might be 0 now */ \ 52130259Scharnier ALIGN_TEXT ; \ 52220253Sjoerg2: ; /* masked by cpl, leave iactive set */ \ 52330259Scharnier APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ 52420253Sjoerg lock ; \ 52520253Sjoerg orl $IRQ_BIT(irq_num), _ipending ; \ 52652527Sdavidn AVCPL_UNLOCK ; \ 52752527Sdavidn ISR_RELLOCK ; /* XXX this is going away... */ \ 52852527Sdavidn POP_FRAME ; \ 52952527Sdavidn iret ; \ 53061762Sdavidn ALIGN_TEXT ; \ 53152527Sdavidn3: ; /* other cpu has isr lock */ \ 53252527Sdavidn APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ 53352527Sdavidn AVCPL_LOCK ; /* MP-safe */ \ 53420253Sjoerg lock ; \ 53552527Sdavidn orl $IRQ_BIT(irq_num), _ipending ; \ 53652527Sdavidn testl $IRQ_BIT(irq_num), _cpl ; \ 53752527Sdavidn jne 4f ; /* this INT masked */ \ 53852527Sdavidn AVCPL_UNLOCK ; \ 53952527Sdavidn call forward_irq ; /* forward irq to lock holder */ \ 54052527Sdavidn POP_FRAME ; /* and return */ \ 54120253Sjoerg iret ; \ 54220253Sjoerg ALIGN_TEXT ; \ 54320253Sjoerg4: ; /* blocked */ \ 54420253Sjoerg APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\ 54552527Sdavidn AVCPL_UNLOCK ; \ 54652527Sdavidn POP_FRAME ; /* and return */ \ 54752527Sdavidn iret 54852527Sdavidn 54920253Sjoerg#endif /* CPL_AND_CML */ 55020253Sjoerg 55152527Sdavidn 55220267Sjoerg/* 55352527Sdavidn * Handle "spurious INTerrupts". 55452527Sdavidn * Notes: 55552527Sdavidn * This is different than the "spurious INTerrupt" generated by an 55652527Sdavidn * 8259 PIC for missing INTs. See the APIC documentation for details. 55752527Sdavidn * This routine should NOT do an 'EOI' cycle. 55852527Sdavidn */ 55920253Sjoerg .text 56020253Sjoerg SUPERALIGN_TEXT 56120253Sjoerg .globl _Xspuriousint 56220253Sjoerg_Xspuriousint: 56352527Sdavidn 56452527Sdavidn /* No EOI cycle used here */ 56552527Sdavidn 56652527Sdavidn iret 56720253Sjoerg 56820253Sjoerg 56920253Sjoerg/* 57052527Sdavidn * Handle TLB shootdowns. 57152527Sdavidn */ 57252527Sdavidn .text 57352527Sdavidn SUPERALIGN_TEXT 57452527Sdavidn .globl _Xinvltlb 57552527Sdavidn_Xinvltlb: 57652527Sdavidn pushl %eax 57752527Sdavidn 57852527Sdavidn#ifdef COUNT_XINVLTLB_HITS 57920253Sjoerg pushl %fs 58052527Sdavidn movl $KPSEL, %eax 58152527Sdavidn movl %ax, %fs 58252527Sdavidn movl _cpuid, %eax 58352527Sdavidn popl %fs 58452527Sdavidn ss 58552527Sdavidn incl _xhits(,%eax,4) 58652527Sdavidn#endif /* COUNT_XINVLTLB_HITS */ 58752527Sdavidn 58852527Sdavidn movl %cr3, %eax /* invalidate the TLB */ 58920747Sdavidn movl %eax, %cr3 590130629Srobert 591130629Srobert ss /* stack segment, avoid %ds load */ 59220747Sdavidn movl $0, lapic_eoi /* End Of Interrupt to APIC */ 59320747Sdavidn 59430259Scharnier popl %eax 59520747Sdavidn iret 59630259Scharnier 59720747Sdavidn 59820747Sdavidn#ifdef BETTER_CLOCK 599124382Siedowse 600124382Siedowse/* 60164918Sgreen * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, 60264918Sgreen * 60364918Sgreen * - Stores current cpu state in checkstate_cpustate[cpuid] 60464918Sgreen * 0 == user, 1 == sys, 2 == intr 605252688Sdes * - Stores current process in checkstate_curproc[cpuid] 60664918Sgreen * 60764918Sgreen * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. 60820267Sjoerg * 60952527Sdavidn * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags 61052527Sdavidn */ 61120267Sjoerg 61220253Sjoerg .text 61364918Sgreen SUPERALIGN_TEXT 61452527Sdavidn .globl _Xcpucheckstate 61552527Sdavidn .globl _checkstate_cpustate 61652527Sdavidn .globl _checkstate_curproc 61752527Sdavidn .globl _checkstate_pc 61852527Sdavidn_Xcpucheckstate: 61920253Sjoerg pushl %eax 62030259Scharnier pushl %ebx 62144229Sdavidn pushl %ds /* save current data segment */ 62230259Scharnier pushl %fs 62320253Sjoerg 62420253Sjoerg movl $KDSEL, %eax 62520253Sjoerg movl %ax, %ds /* use KERNEL data segment */ 62620253Sjoerg movl $KPSEL, %eax 62720253Sjoerg movl %ax, %fs 62820253Sjoerg 62920253Sjoerg movl $0, lapic_eoi /* End Of Interrupt to APIC */ 63020253Sjoerg 63120253Sjoerg movl $0, %ebx 63220253Sjoerg movl 20(%esp), %eax 63320253Sjoerg andl $3, %eax 63420253Sjoerg cmpl $3, %eax 63520253Sjoerg je 1f 63664918Sgreen testl $PSL_VM, 24(%esp) 637272833Sdes jne 1f 63864918Sgreen incl %ebx /* system or interrupt */ 63964918Sgreen#ifdef CPL_AND_CML 64064918Sgreen cmpl $0, _inside_intr 64152527Sdavidn je 1f 64220253Sjoerg incl %ebx /* interrupt */ 64320253Sjoerg#endif 64430259Scharnier1: 64520253Sjoerg movl _cpuid, %eax 64620253Sjoerg movl %ebx, _checkstate_cpustate(,%eax,4) 64720253Sjoerg movl _curproc, %ebx 64820253Sjoerg movl %ebx, _checkstate_curproc(,%eax,4) 64920253Sjoerg movl 16(%esp), %ebx 65052527Sdavidn movl %ebx, _checkstate_pc(,%eax,4) 65152527Sdavidn 65252527Sdavidn lock /* checkstate_probed_cpus |= (1<<id) */ 65352527Sdavidn btsl %eax, _checkstate_probed_cpus 65452527Sdavidn 65552527Sdavidn popl %fs 65652527Sdavidn popl %ds /* restore previous data segment */ 65720253Sjoerg popl %ebx 658124382Siedowse popl %eax 659124382Siedowse iret 66063572Sdavidn 66163572Sdavidn#endif /* BETTER_CLOCK */ 66263572Sdavidn 66363572Sdavidn/* 66463572Sdavidn * Executed by a CPU when it receives an Xcpuast IPI from another CPU, 66563572Sdavidn * 66620253Sjoerg * - Signals its receipt by clearing bit cpuid in checkstate_need_ast. 667124382Siedowse * 66820253Sjoerg * - We need a better method of triggering asts on other cpus. 66920253Sjoerg */ 67020253Sjoerg 67164918Sgreen .text 67220253Sjoerg SUPERALIGN_TEXT 67320253Sjoerg .globl _Xcpuast 67420253Sjoerg_Xcpuast: 67520253Sjoerg PUSH_FRAME 67620253Sjoerg movl $KDSEL, %eax 67720253Sjoerg movl %ax, %ds /* use KERNEL data segment */ 67820253Sjoerg movl %ax, %es 67920253Sjoerg movl $KPSEL, %eax 68020253Sjoerg movl %ax, %fs 68120253Sjoerg 682124382Siedowse movl _cpuid, %eax 683124382Siedowse lock /* checkstate_need_ast &= ~(1<<id) */ 684124382Siedowse btrl %eax, _checkstate_need_ast 685124382Siedowse movl $0, lapic_eoi /* End Of Interrupt to APIC */ 68620253Sjoerg 68720253Sjoerg lock 68820253Sjoerg btsl %eax, _checkstate_pending_ast 68920253Sjoerg jc 1f 69020253Sjoerg 69120253Sjoerg FAKE_MCOUNT(13*4(%esp)) 69220253Sjoerg 69320253Sjoerg /* 69420253Sjoerg * Giant locks do not come cheap. 695283814Sbapt * A lot of cycles are going to be wasted here. 696283814Sbapt */ 697283814Sbapt call _get_isrlock 69820253Sjoerg 699168045Sle AVCPL_LOCK 70020253Sjoerg#ifdef CPL_AND_CML 70120253Sjoerg movl _cml, %eax 70230259Scharnier#else 703124382Siedowse movl _cpl, %eax 704124382Siedowse#endif 705124382Siedowse pushl %eax 706124382Siedowse movl $1, _astpending /* XXX */ 707124382Siedowse AVCPL_UNLOCK 708124382Siedowse lock 709124382Siedowse incb _intr_nesting_level 710272833Sdes sti 711124382Siedowse 712124382Siedowse pushl $0 713124382Siedowse 714124382Siedowse movl _cpuid, %eax 71552527Sdavidn lock 71620253Sjoerg btrl %eax, _checkstate_pending_ast 71720253Sjoerg lock 71820267Sjoerg btrl %eax, CNAME(resched_cpus) 71920267Sjoerg jnc 2f 72020267Sjoerg movl $1, CNAME(want_resched) 72120267Sjoerg lock 72220267Sjoerg incl CNAME(want_resched_cnt) 72344386Sdavidn2: 72444386Sdavidn lock 72544386Sdavidn incl CNAME(cpuast_cnt) 72620267Sjoerg MEXITCOUNT 72721330Sdavidn jmp _doreti 72852527Sdavidn1: 72952502Sdavidn /* We are already in the process of delivering an ast for this CPU */ 730283814Sbapt POP_FRAME 731283814Sbapt iret 732283814Sbapt 733283814Sbapt 734283814Sbapt/* 73552502Sdavidn * Executed by a CPU when it receives an XFORWARD_IRQ IPI. 73652502Sdavidn */ 73752502Sdavidn 73852502Sdavidn .text 73952502Sdavidn SUPERALIGN_TEXT 74056000Sdavidn .globl _Xforward_irq 74152502Sdavidn_Xforward_irq: 74252502Sdavidn PUSH_FRAME 74352512Sdavidn movl $KDSEL, %eax 74452527Sdavidn movl %ax, %ds /* use KERNEL data segment */ 74552527Sdavidn movl %ax, %es 746283814Sbapt movl $KPSEL, %eax 747283814Sbapt movl %ax, %fs 748283814Sbapt 749283814Sbapt movl $0, lapic_eoi /* End Of Interrupt to APIC */ 75052527Sdavidn 75152527Sdavidn FAKE_MCOUNT(13*4(%esp)) 75252527Sdavidn 75352527Sdavidn ISR_TRYLOCK 75452527Sdavidn testl %eax,%eax /* Did we get the lock ? */ 75556000Sdavidn jz 1f /* No */ 75652527Sdavidn 75752527Sdavidn lock 75852502Sdavidn incl CNAME(forward_irq_hitcnt) 75921330Sdavidn cmpb $4, _intr_nesting_level 76021330Sdavidn jae 2f 76120253Sjoerg 76220253Sjoerg AVCPL_LOCK 76320253Sjoerg#ifdef CPL_AND_CML 76420253Sjoerg movl _cml, %eax 765242349Sbapt#else 766273779Sbapt movl _cpl, %eax 767273779Sbapt#endif 768273779Sbapt pushl %eax 769273779Sbapt AVCPL_UNLOCK 770273779Sbapt lock 771273779Sbapt incb _intr_nesting_level 772273779Sbapt sti 773273779Sbapt 774273779Sbapt pushl $0 775273779Sbapt 776273779Sbapt MEXITCOUNT 777273779Sbapt jmp _doreti /* Handle forwarded interrupt */ 778273779Sbapt1: 779273779Sbapt lock 780273779Sbapt incl CNAME(forward_irq_misscnt) 781273779Sbapt call forward_irq /* Oops, we've lost the isr lock */ 782273779Sbapt MEXITCOUNT 783273779Sbapt POP_FRAME 784273779Sbapt iret 785242349Sbapt2: 786242349Sbapt lock 787244737Sbapt incl CNAME(forward_irq_toodeepcnt) 788244737Sbapt3: 789244737Sbapt ISR_RELLOCK 790244737Sbapt MEXITCOUNT 791244737Sbapt POP_FRAME 792244737Sbapt iret 793244737Sbapt 794244737Sbapt/* 795242349Sbapt * 796242349Sbapt */ 797245114Smjgforward_irq: 798242349Sbapt MCOUNT 799242349Sbapt cmpl $0,_invltlb_ok 800242349Sbapt jz 4f 801242349Sbapt 80261759Sdavidn cmpl $0, CNAME(forward_irq_enabled) 80361759Sdavidn jz 4f 80461759Sdavidn 80561759Sdavidn movl _mp_lock,%eax 80661759Sdavidn cmpl $FREE_LOCK,%eax 80761759Sdavidn jne 1f 80861759Sdavidn movl $0, %eax /* Pick CPU #0 if noone has lock */ 80961759Sdavidn1: 81061759Sdavidn shrl $24,%eax 81161759Sdavidn movl _cpu_num_to_apic_id(,%eax,4),%ecx 81230259Scharnier shll $24,%ecx 81320253Sjoerg movl lapic_icr_hi, %eax 81444229Sdavidn andl $~APIC_ID_MASK, %eax 815283842Sbapt orl %ecx, %eax 816283842Sbapt movl %eax, lapic_icr_hi 817283842Sbapt 81820253Sjoerg2: 81920253Sjoerg movl lapic_icr_lo, %eax 82020253Sjoerg andl $APIC_DELSTAT_MASK,%eax 82120253Sjoerg jnz 2b 82220253Sjoerg movl lapic_icr_lo, %eax 82320253Sjoerg andl $APIC_RESV2_MASK, %eax 82420253Sjoerg orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax 82520253Sjoerg movl %eax, lapic_icr_lo 826283961Sbapt3: 827283961Sbapt movl lapic_icr_lo, %eax 828283961Sbapt andl $APIC_DELSTAT_MASK,%eax 829283961Sbapt jnz 3b 830283961Sbapt4: 83144229Sdavidn ret 832283961Sbapt 83320253Sjoerg/* 83420253Sjoerg * Executed by a CPU when it receives an Xcpustop IPI from another CPU, 83552527Sdavidn * 83620253Sjoerg * - Signals its receipt. 83782868Sdd * - Waits for permission to restart. 83820253Sjoerg * - Signals its restart. 83920253Sjoerg */ 84020253Sjoerg 841283961Sbapt .text 842283961Sbapt SUPERALIGN_TEXT 843283961Sbapt .globl _Xcpustop 84452527Sdavidn_Xcpustop: 84582868Sdd pushl %ebp 84682868Sdd movl %esp, %ebp 84782868Sdd pushl %eax 84882868Sdd pushl %ecx 84982868Sdd pushl %edx 85082868Sdd pushl %ds /* save current data segment */ 85182868Sdd pushl %fs 85282868Sdd 85382868Sdd movl $KDSEL, %eax 85482868Sdd movl %ax, %ds /* use KERNEL data segment */ 85582868Sdd movl $KPSEL, %eax 85682868Sdd movl %ax, %fs 85782868Sdd 85882868Sdd movl $0, lapic_eoi /* End Of Interrupt to APIC */ 85982868Sdd 860283842Sbapt movl _cpuid, %eax 861283842Sbapt imull $PCB_SIZE, %eax 86282868Sdd leal CNAME(stoppcbs)(%eax), %eax 86382868Sdd pushl %eax 86482868Sdd call CNAME(savectx) /* Save process context */ 86582868Sdd addl $4, %esp 86620267Sjoerg 86720253Sjoerg 86820253Sjoerg movl _cpuid, %eax 86920253Sjoerg 87020253Sjoerg lock 87120253Sjoerg btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */ 87220253Sjoerg1: 87320253Sjoerg btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */ 87420253Sjoerg jnc 1b 87520253Sjoerg 87620253Sjoerg lock 87720253Sjoerg btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */ 87820253Sjoerg lock 87920253Sjoerg btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */ 88020253Sjoerg 88120253Sjoerg test %eax, %eax 88220253Sjoerg jnz 2f 88344229Sdavidn 884283842Sbapt movl CNAME(cpustop_restartfunc), %eax 88520253Sjoerg test %eax, %eax 88620253Sjoerg jz 2f 88720253Sjoerg movl $0, CNAME(cpustop_restartfunc) /* One-shot */ 88820253Sjoerg 88920253Sjoerg call %eax 89020253Sjoerg2: 89120253Sjoerg popl %fs 89220253Sjoerg popl %ds /* restore previous data segment */ 89320253Sjoerg popl %edx 89420253Sjoerg popl %ecx 89520253Sjoerg popl %eax 89620253Sjoerg movl %ebp, %esp 89720253Sjoerg popl %ebp 89820253Sjoerg iret 89920253Sjoerg 90020253Sjoerg 90120253SjoergMCOUNT_LABEL(bintr) 90220253Sjoerg FAST_INTR(0,fastintr0) 90344229Sdavidn FAST_INTR(1,fastintr1) 90444229Sdavidn FAST_INTR(2,fastintr2) 90544229Sdavidn FAST_INTR(3,fastintr3) 90620253Sjoerg FAST_INTR(4,fastintr4) 90744229Sdavidn FAST_INTR(5,fastintr5) 90820253Sjoerg FAST_INTR(6,fastintr6) 90920253Sjoerg FAST_INTR(7,fastintr7) 91020253Sjoerg FAST_INTR(8,fastintr8) 91120253Sjoerg FAST_INTR(9,fastintr9) 91220253Sjoerg FAST_INTR(10,fastintr10) 91320253Sjoerg FAST_INTR(11,fastintr11) 91420253Sjoerg FAST_INTR(12,fastintr12) 91520253Sjoerg FAST_INTR(13,fastintr13) 91620253Sjoerg FAST_INTR(14,fastintr14) 91720253Sjoerg FAST_INTR(15,fastintr15) 91820253Sjoerg FAST_INTR(16,fastintr16) 91930259Scharnier FAST_INTR(17,fastintr17) 92020253Sjoerg FAST_INTR(18,fastintr18) 92120253Sjoerg FAST_INTR(19,fastintr19) 92220253Sjoerg FAST_INTR(20,fastintr20) 92320253Sjoerg FAST_INTR(21,fastintr21) 92420253Sjoerg FAST_INTR(22,fastintr22) 92520253Sjoerg FAST_INTR(23,fastintr23) 92620253Sjoerg#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending) 92720253Sjoerg INTR(0,intr0, CLKINTR_PENDING) 92820253Sjoerg INTR(1,intr1,) 92920253Sjoerg INTR(2,intr2,) 93020253Sjoerg INTR(3,intr3,) 93120253Sjoerg INTR(4,intr4,) 93220253Sjoerg INTR(5,intr5,) 93320253Sjoerg INTR(6,intr6,) 93420253Sjoerg INTR(7,intr7,) 93520253Sjoerg INTR(8,intr8,) 93620253Sjoerg INTR(9,intr9,) 93720253Sjoerg INTR(10,intr10,) 93820253Sjoerg INTR(11,intr11,) 93920253Sjoerg INTR(12,intr12,) 94020253Sjoerg INTR(13,intr13,) 94120253Sjoerg INTR(14,intr14,) 94244229Sdavidn INTR(15,intr15,) 94320253Sjoerg INTR(16,intr16,) 94444229Sdavidn INTR(17,intr17,) 94520253Sjoerg INTR(18,intr18,) 94661957Sache INTR(19,intr19,) 94730259Scharnier INTR(20,intr20,) 94820253Sjoerg INTR(21,intr21,) 94920253Sjoerg INTR(22,intr22,) 950262865Sjulian INTR(23,intr23,) 951262865SjulianMCOUNT_LABEL(eintr) 95220267Sjoerg 95320253Sjoerg/* 95420253Sjoerg * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU. 95520253Sjoerg * 95620253Sjoerg * - Calls the generic rendezvous action function. 95720253Sjoerg */ 95820253Sjoerg .text 95920253Sjoerg SUPERALIGN_TEXT 96020253Sjoerg .globl _Xrendezvous 96120253Sjoerg_Xrendezvous: 96220253Sjoerg PUSH_FRAME 96320253Sjoerg movl $KDSEL, %eax 96420253Sjoerg movl %ax, %ds /* use KERNEL data segment */ 96520253Sjoerg movl %ax, %es 96620253Sjoerg movl $KPSEL, %eax 96720253Sjoerg movl %ax, %fs 96820253Sjoerg 96944229Sdavidn call smp_rendezvous_action 970282700Sbapt 97120253Sjoerg movl $0, lapic_eoi /* End Of Interrupt to APIC */ 97220253Sjoerg POP_FRAME 97320267Sjoerg iret 97420267Sjoerg 97520267Sjoerg 97620267Sjoerg .data 97720267Sjoerg/* 97820267Sjoerg * Addresses of interrupt handlers. 97920267Sjoerg * XresumeNN: Resumption addresses for HWIs. 98020267Sjoerg */ 98120267Sjoerg .globl _ihandlers 98244229Sdavidn_ihandlers: 98320267Sjoerg/* 98420267Sjoerg * used by: 98570486Sben * ipl.s: doreti_unpend 98620253Sjoerg */ 98770486Sben .long Xresume0, Xresume1, Xresume2, Xresume3 98820253Sjoerg .long Xresume4, Xresume5, Xresume6, Xresume7 98920253Sjoerg .long Xresume8, Xresume9, Xresume10, Xresume11 99020253Sjoerg .long Xresume12, Xresume13, Xresume14, Xresume15 99120253Sjoerg .long Xresume16, Xresume17, Xresume18, Xresume19 99244229Sdavidn .long Xresume20, Xresume21, Xresume22, Xresume23 99320253Sjoerg/* 99420253Sjoerg * used by: 99520253Sjoerg * ipl.s: doreti_unpend 99620253Sjoerg * apic_ipl.s: splz_unpend 99720253Sjoerg */ 99820253Sjoerg .long _swi_null, swi_net, _swi_null, _swi_null 99920253Sjoerg .long _swi_vm, _swi_null, _softclock, _swi_null 100020253Sjoerg 100120253Sjoergimasks: /* masks for interrupt handlers */ 100227831Sdavidn .space NHWI*4 /* padding; HWI masks are elsewhere */ 100320253Sjoerg 100420253Sjoerg .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK 100520253Sjoerg .long SWI_VM_MASK, 0, SWI_CLOCK_MASK, 0 100630259Scharnier 100720253Sjoerg/* active flag for lazy masking */ 100820253Sjoergiactive: 100920253Sjoerg .long 0 101020253Sjoerg 101120253Sjoerg#ifdef COUNT_XINVLTLB_HITS 101220253Sjoerg .globl _xhits 101320253Sjoerg_xhits: 101420253Sjoerg .space (NCPU * 4), 0 101520253Sjoerg#endif /* COUNT_XINVLTLB_HITS */ 101620253Sjoerg 101720253Sjoerg/* variables used by stop_cpus()/restart_cpus()/Xcpustop */ 101820253Sjoerg .globl _stopped_cpus, _started_cpus 101920253Sjoerg_stopped_cpus: 102020253Sjoerg .long 0 102120253Sjoerg_started_cpus: 102230259Scharnier .long 0 102320253Sjoerg 102420253Sjoerg#ifdef BETTER_CLOCK 102520253Sjoerg .globl _checkstate_probed_cpus 102620253Sjoerg_checkstate_probed_cpus: 102720253Sjoerg .long 0 102820253Sjoerg#endif /* BETTER_CLOCK */ 102920253Sjoerg .globl _checkstate_need_ast 103020253Sjoerg_checkstate_need_ast: 103120253Sjoerg .long 0 103220253Sjoerg_checkstate_pending_ast: 1033282699Sbapt .long 0 103420253Sjoerg .globl CNAME(forward_irq_misscnt) 103520253Sjoerg .globl CNAME(forward_irq_toodeepcnt) 1036282699Sbapt .globl CNAME(forward_irq_hitcnt) 103720253Sjoerg .globl CNAME(resched_cpus) 1038282699Sbapt .globl CNAME(want_resched_cnt) 1039282699Sbapt .globl CNAME(cpuast_cnt) 1040282699Sbapt .globl CNAME(cpustop_restartfunc) 1041282699SbaptCNAME(forward_irq_misscnt): 1042282699Sbapt .long 0 104320253SjoergCNAME(forward_irq_hitcnt): 104420253Sjoerg .long 0 104520253SjoergCNAME(forward_irq_toodeepcnt): 104620253Sjoerg .long 0 104720253SjoergCNAME(resched_cpus): 104820253Sjoerg .long 0 104920253SjoergCNAME(want_resched_cnt): 105020253Sjoerg .long 0 105120253SjoergCNAME(cpuast_cnt): 105220253Sjoerg .long 0 105320253SjoergCNAME(cpustop_restartfunc): 105420253Sjoerg .long 0 105520253Sjoerg 105620253Sjoerg 1057130633Srobert 105820253Sjoerg .globl _apic_pin_trigger 105920253Sjoerg_apic_pin_trigger: 106020253Sjoerg .long 0 106120253Sjoerg 106220253Sjoerg .text 1063282700Sbapt