apic_vector.s revision 26812
1/* 2 * from: vector.s, 386BSD 0.1 unknown origin 3 * $Id: apic_vector.s,v 1.2 1997/05/31 08:59:51 peter Exp $ 4 */ 5 6 7/* convert an absolute IRQ# into a bitmask */ 8#define IRQ_BIT(irq_num) (1 << (irq_num)) 9 10/* make an index into the IO APIC from the IRQ# */ 11#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) 12 13/* 14 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au> 15 */ 16#define MAYBE_MASK_IRQ(irq_num) \ 17 testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ 18 je 1f ; /* NOT currently active */ \ 19 orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ 20 movl _ioapic,%ecx ; /* ioapic[0]addr */ \ 21 movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ 22 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 23 orl $IOART_INTMASK,%eax ; /* set the mask */ \ 24 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 25 movl $lapic_eoi, %eax ; \ 26 movl $0, (%eax) ; \ 27 orl $IRQ_BIT(irq_num), _ipending ; \ 28 REL_MPLOCK ; /* SMP release global lock */ \ 29 popl %es ; \ 30 popl %ds ; \ 31 popal ; \ 32 addl $4+4,%esp ; \ 33 iret ; \ 34; \ 35 ALIGN_TEXT ; \ 361: ; \ 37 orl $IRQ_BIT(irq_num),iactive 38 39 40#define MAYBE_UNMASK_IRQ(irq_num) \ 41 cli ; /* must unmask _imen and icu atomically */ \ 42 andl $~IRQ_BIT(irq_num),iactive ; \ 43 testl $IRQ_BIT(irq_num),_imen ; \ 44 je 2f ; \ 45 andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ 46 movl _ioapic,%ecx ; /* ioapic[0]addr */ \ 47 movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ 48 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 49 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ 50 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 512: ; \ 52 sti ; /* XXX _doreti repeats the cli/sti */ 53 54 55/* 56 * Macros for interrupt interrupt entry, call to handler, and exit. 57 */ 58 59#define FAST_INTR(irq_num, vec_name) \ 60 .text ; \ 61 SUPERALIGN_TEXT ; \ 62IDTVEC(vec_name) ; \ 63 pushl %eax ; /* save only call-used registers */ \ 64 pushl %ecx ; \ 65 pushl %edx ; \ 66 pushl %ds ; \ 67 MAYBE_PUSHL_ES ; \ 68 movl $KDSEL,%eax ; \ 69 movl %ax,%ds ; \ 70 MAYBE_MOVW_AX_ES ; \ 71 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ 72 GET_MPLOCK ; /* SMP Spin lock */ \ 73 pushl _intr_unit + (irq_num) * 4 ; \ 74 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 75 movl $lapic_eoi, %eax ; \ 76 movl $0, (%eax) ; \ 77 addl $4,%esp ; \ 78 incl _cnt+V_INTR ; /* book-keeping can wait */ \ 79 movl _intr_countp + (irq_num) * 4,%eax ; \ 80 incl (%eax) ; \ 81 movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ 82 notl %eax ; \ 83 andl _ipending,%eax ; \ 84 jne 2f ; /* yes, maybe handle them */ \ 851: ; \ 86 MEXITCOUNT ; \ 87 REL_MPLOCK ; /* SMP release global lock */ \ 88 MAYBE_POPL_ES ; \ 89 popl %ds ; \ 90 popl %edx ; \ 91 popl %ecx ; \ 92 popl %eax ; \ 93 iret ; \ 94; \ 95 ALIGN_TEXT ; \ 962: ; \ 97 cmpb $3,_intr_nesting_level ; /* enough stack? */ \ 98 jae 1b ; /* no, return */ \ 99 movl _cpl,%eax ; \ 100 /* XXX next line is probably unnecessary now. */ \ 101 movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ 102 incb _intr_nesting_level ; /* ... really limit it ... */ \ 103 sti ; /* to do this as early as possible */ \ 104 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ 105 popl %ecx ; /* ... original %ds ... */ \ 106 popl %edx ; \ 107 xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ 108 pushal ; /* build fat frame (grrr) ... */ \ 109 pushl %ecx ; /* ... actually %ds ... */ \ 110 pushl %es ; \ 111 movl $KDSEL,%eax ; \ 112 movl %ax,%es ; \ 113 movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ 114 movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ 115 movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ 116 pushl %eax ; \ 117 subl $4,%esp ; /* junk for unit number */ \ 118 MEXITCOUNT ; \ 119 jmp _doreti 120 121#define INTR(irq_num, vec_name) \ 122 .text ; \ 123 SUPERALIGN_TEXT ; \ 124IDTVEC(vec_name) ; \ 125 pushl $0 ; /* dummy error code */ \ 126 pushl $0 ; /* dummy trap type */ \ 127 pushal ; \ 128 pushl %ds ; /* save data and extra segments ... */ \ 129 pushl %es ; \ 130 movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ 131 movl %ax,%ds ; /* ... early for obsolete reasons */ \ 132 movl %ax,%es ; \ 133 GET_MPLOCK ; /* SMP Spin lock */ \ 134 MAYBE_MASK_IRQ(irq_num) ; \ 135 movl $lapic_eoi, %eax ; \ 136 movl $0, (%eax) ; \ 137 movl _cpl,%eax ; \ 138 testl $IRQ_BIT(irq_num), %eax ; \ 139 jne 3f ; \ 140 incb _intr_nesting_level ; \ 141__CONCAT(Xresume,irq_num): ; \ 142 FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ 143 incl _cnt+V_INTR ; /* tally interrupts */ \ 144 movl _intr_countp + (irq_num) * 4,%eax ; \ 145 incl (%eax) ; \ 146 movl _cpl,%eax ; \ 147 pushl %eax ; \ 148 pushl _intr_unit + (irq_num) * 4 ; \ 149 orl _intr_mask + (irq_num) * 4,%eax ; \ 150 movl %eax,_cpl ; \ 151 sti ; \ 152 call *_intr_handler + (irq_num) * 4 ; \ 153 MAYBE_UNMASK_IRQ(irq_num) ; \ 154 MEXITCOUNT ; \ 155 jmp _doreti ; \ 156; \ 157 ALIGN_TEXT ; \ 1583: ; \ 159 /* XXX skip mcounting here to avoid double count */ \ 160 orl $IRQ_BIT(irq_num), _ipending ; \ 161 REL_MPLOCK ; /* SMP release global lock */ \ 162 popl %es ; \ 163 popl %ds ; \ 164 popal ; \ 165 addl $4+4,%esp ; \ 166 iret 167 168 .text 169 SUPERALIGN_TEXT 170 .globl _Xinvltlb 171_Xinvltlb: 172 pushl %eax 173 movl %cr3, %eax /* invalidate the TLB */ 174 movl %eax, %cr3 175 movl $lapic_eoi, %eax 176 ss /* stack segment, avoid %ds load */ 177 movl $0, (%eax) /* End Of Interrupt to APIC */ 178 popl %eax 179 iret 180 181MCOUNT_LABEL(bintr) 182 FAST_INTR(0,fastintr0) 183 FAST_INTR(1,fastintr1) 184 FAST_INTR(2,fastintr2) 185 FAST_INTR(3,fastintr3) 186 FAST_INTR(4,fastintr4) 187 FAST_INTR(5,fastintr5) 188 FAST_INTR(6,fastintr6) 189 FAST_INTR(7,fastintr7) 190 FAST_INTR(8,fastintr8) 191 FAST_INTR(9,fastintr9) 192 FAST_INTR(10,fastintr10) 193 FAST_INTR(11,fastintr11) 194 FAST_INTR(12,fastintr12) 195 FAST_INTR(13,fastintr13) 196 FAST_INTR(14,fastintr14) 197 FAST_INTR(15,fastintr15) 198 FAST_INTR(16,fastintr16) 199 FAST_INTR(17,fastintr17) 200 FAST_INTR(18,fastintr18) 201 FAST_INTR(19,fastintr19) 202 FAST_INTR(20,fastintr20) 203 FAST_INTR(21,fastintr21) 204 FAST_INTR(22,fastintr22) 205 FAST_INTR(23,fastintr23) 206 INTR(0,intr0) 207 INTR(1,intr1) 208 INTR(2,intr2) 209 INTR(3,intr3) 210 INTR(4,intr4) 211 INTR(5,intr5) 212 INTR(6,intr6) 213 INTR(7,intr7) 214 INTR(8,intr8) 215 INTR(9,intr9) 216 INTR(10,intr10) 217 INTR(11,intr11) 218 INTR(12,intr12) 219 INTR(13,intr13) 220 INTR(14,intr14) 221 INTR(15,intr15) 222 INTR(16,intr16) 223 INTR(17,intr17) 224 INTR(18,intr18) 225 INTR(19,intr19) 226 INTR(20,intr20) 227 INTR(21,intr21) 228 INTR(22,intr22) 229 INTR(23,intr23) 230MCOUNT_LABEL(eintr) 231 232 .data 233ihandlers: /* addresses of interrupt handlers */ 234 /* actually resumption addresses for HWI's */ 235 .long Xresume0, Xresume1, Xresume2, Xresume3 236 .long Xresume4, Xresume5, Xresume6, Xresume7 237 .long Xresume8, Xresume9, Xresume10, Xresume11 238 .long Xresume12, Xresume13, Xresume14, Xresume15 239 .long Xresume16, Xresume17, Xresume18, Xresume19 240 .long Xresume20, Xresume21, Xresume22, Xresume23 241 .long swi_tty, swi_net 242 .long 0, 0, 0, 0 243 .long _softclock, swi_ast 244 245imasks: /* masks for interrupt handlers */ 246 .space NHWI*4 /* padding; HWI masks are elsewhere */ 247 248 .long SWI_TTY_MASK, SWI_NET_MASK 249 .long 0, 0, 0, 0 250 .long SWI_CLOCK_MASK, SWI_AST_MASK 251 252 .globl _ivectors 253_ivectors: 254 .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 255 .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 256 .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 257 .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 258 .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 259 .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 260 261/* active flag for lazy masking */ 262iactive: 263 .long 0 264 265 266/* 267 * Interrupt counters and names. The format of these and the label names 268 * must agree with what vmstat expects. The tables are indexed by device 269 * ids so that we don't have to move the names around as devices are 270 * attached. 271 */ 272#include "vector.h" 273 .globl _intrcnt, _eintrcnt 274_intrcnt: 275 .space (NR_DEVICES + ICU_LEN) * 4 276_eintrcnt: 277 278 .globl _intrnames, _eintrnames 279_intrnames: 280 .ascii DEVICE_NAMES 281 .asciz "stray irq0" 282 .asciz "stray irq1" 283 .asciz "stray irq2" 284 .asciz "stray irq3" 285 .asciz "stray irq4" 286 .asciz "stray irq5" 287 .asciz "stray irq6" 288 .asciz "stray irq7" 289 .asciz "stray irq8" 290 .asciz "stray irq9" 291 .asciz "stray irq10" 292 .asciz "stray irq11" 293 .asciz "stray irq12" 294 .asciz "stray irq13" 295 .asciz "stray irq14" 296 .asciz "stray irq15" 297 .asciz "stray irq16" 298 .asciz "stray irq17" 299 .asciz "stray irq18" 300 .asciz "stray irq19" 301 .asciz "stray irq20" 302 .asciz "stray irq21" 303 .asciz "stray irq22" 304 .asciz "stray irq23" 305_eintrnames: 306 307 .text 308