apic_vector.s revision 27007
1/* 2 * from: vector.s, 386BSD 0.1 unknown origin 3 * $Id: apic_vector.s,v 1.1 1997/06/26 17:52:12 smp Exp smp $ 4 */ 5 6 7#include <machine/smptests.h> /** TEST_CPUSTOP */ 8 9/* convert an absolute IRQ# into a bitmask */ 10#define IRQ_BIT(irq_num) (1 << (irq_num)) 11 12/* make an index into the IO APIC from the IRQ# */ 13#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) 14 15/* 16 * 'lazy masking' code suggested by Bruce Evans <bde@zeta.org.au> 17 */ 18#define MAYBE_MASK_IRQ(irq_num) \ 19 testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ 20 je 1f ; /* NOT currently active */ \ 21 orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ 22 movl _ioapic,%ecx ; /* ioapic[0]addr */ \ 23 movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ 24 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 25 orl $IOART_INTMASK,%eax ; /* set the mask */ \ 26 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 27 movl $lapic_eoi, %eax ; \ 28 movl $0, (%eax) ; \ 29 orl $IRQ_BIT(irq_num), _ipending ; \ 30 REL_MPLOCK ; /* SMP release global lock */ \ 31 popl %es ; \ 32 popl %ds ; \ 33 popal ; \ 34 addl $4+4,%esp ; \ 35 iret ; \ 36; \ 37 ALIGN_TEXT ; \ 381: ; \ 39 orl $IRQ_BIT(irq_num),iactive 40 41 42#define MAYBE_UNMASK_IRQ(irq_num) \ 43 cli ; /* must unmask _imen and icu atomically */ \ 44 andl $~IRQ_BIT(irq_num),iactive ; \ 45 testl $IRQ_BIT(irq_num),_imen ; \ 46 je 2f ; \ 47 andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ 48 movl _ioapic,%ecx ; /* ioapic[0]addr */ \ 49 movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ 50 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 51 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ 52 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 532: ; \ 54 sti ; /* XXX _doreti repeats the cli/sti */ 55 56 57/* 58 * Macros for interrupt interrupt entry, call to handler, and exit. 59 */ 60 61#define FAST_INTR(irq_num, vec_name) \ 62 .text ; \ 63 SUPERALIGN_TEXT ; \ 64IDTVEC(vec_name) ; \ 65 pushl %eax ; /* save only call-used registers */ \ 66 pushl %ecx ; \ 67 pushl %edx ; \ 68 pushl %ds ; \ 69 MAYBE_PUSHL_ES ; \ 70 movl $KDSEL,%eax ; \ 71 movl %ax,%ds ; \ 72 MAYBE_MOVW_AX_ES ; \ 73 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ 74 GET_MPLOCK ; /* SMP Spin lock */ \ 75 pushl _intr_unit + (irq_num) * 4 ; \ 76 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 77 movl $lapic_eoi, %eax ; \ 78 movl $0, (%eax) ; \ 79 addl $4,%esp ; \ 80 incl _cnt+V_INTR ; /* book-keeping can wait */ \ 81 movl _intr_countp + (irq_num) * 4,%eax ; \ 82 incl (%eax) ; \ 83 movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ 84 notl %eax ; \ 85 andl _ipending,%eax ; \ 86 jne 2f ; /* yes, maybe handle them */ \ 871: ; \ 88 MEXITCOUNT ; \ 89 REL_MPLOCK ; /* SMP release global lock */ \ 90 MAYBE_POPL_ES ; \ 91 popl %ds ; \ 92 popl %edx ; \ 93 popl %ecx ; \ 94 popl %eax ; \ 95 iret ; \ 96; \ 97 ALIGN_TEXT ; \ 982: ; \ 99 cmpb $3,_intr_nesting_level ; /* enough stack? */ \ 100 jae 1b ; /* no, return */ \ 101 movl _cpl,%eax ; \ 102 /* XXX next line is probably unnecessary now. */ \ 103 movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ 104 incb _intr_nesting_level ; /* ... really limit it ... */ \ 105 sti ; /* to do this as early as possible */ \ 106 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ 107 popl %ecx ; /* ... original %ds ... */ \ 108 popl %edx ; \ 109 xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ 110 pushal ; /* build fat frame (grrr) ... */ \ 111 pushl %ecx ; /* ... actually %ds ... */ \ 112 pushl %es ; \ 113 movl $KDSEL,%eax ; \ 114 movl %ax,%es ; \ 115 movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ 116 movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ 117 movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ 118 pushl %eax ; \ 119 subl $4,%esp ; /* junk for unit number */ \ 120 MEXITCOUNT ; \ 121 jmp _doreti 122 123#define INTR(irq_num, vec_name) \ 124 .text ; \ 125 SUPERALIGN_TEXT ; \ 126IDTVEC(vec_name) ; \ 127 pushl $0 ; /* dummy error code */ \ 128 pushl $0 ; /* dummy trap type */ \ 129 pushal ; \ 130 pushl %ds ; /* save data and extra segments ... */ \ 131 pushl %es ; \ 132 movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ 133 movl %ax,%ds ; /* ... early for obsolete reasons */ \ 134 movl %ax,%es ; \ 135 GET_MPLOCK ; /* SMP Spin lock */ \ 136 MAYBE_MASK_IRQ(irq_num) ; \ 137 movl $lapic_eoi, %eax ; \ 138 movl $0, (%eax) ; \ 139 movl _cpl,%eax ; \ 140 testl $IRQ_BIT(irq_num), %eax ; \ 141 jne 3f ; \ 142 incb _intr_nesting_level ; \ 143__CONCAT(Xresume,irq_num): ; \ 144 FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ 145 incl _cnt+V_INTR ; /* tally interrupts */ \ 146 movl _intr_countp + (irq_num) * 4,%eax ; \ 147 incl (%eax) ; \ 148 movl _cpl,%eax ; \ 149 pushl %eax ; \ 150 pushl _intr_unit + (irq_num) * 4 ; \ 151 orl _intr_mask + (irq_num) * 4,%eax ; \ 152 movl %eax,_cpl ; \ 153 sti ; \ 154 call *_intr_handler + (irq_num) * 4 ; \ 155 MAYBE_UNMASK_IRQ(irq_num) ; \ 156 MEXITCOUNT ; \ 157 jmp _doreti ; \ 158; \ 159 ALIGN_TEXT ; \ 1603: ; \ 161 /* XXX skip mcounting here to avoid double count */ \ 162 orl $IRQ_BIT(irq_num), _ipending ; \ 163 REL_MPLOCK ; /* SMP release global lock */ \ 164 popl %es ; \ 165 popl %ds ; \ 166 popal ; \ 167 addl $4+4,%esp ; \ 168 iret 169 170 .text 171 SUPERALIGN_TEXT 172 .globl _Xinvltlb 173_Xinvltlb: 174 pushl %eax 175 movl %cr3, %eax /* invalidate the TLB */ 176 movl %eax, %cr3 177 movl $lapic_eoi, %eax 178 ss /* stack segment, avoid %ds load */ 179 movl $0, (%eax) /* End Of Interrupt to APIC */ 180 popl %eax 181 iret 182 183#ifdef TEST_CPUSTOP 184/* 185 * Executed by a CPU when it receives an Xcpustop IPI from another CPU, 186 * 187 * - Signals its receipt. 188 * - Waits for permission to restart. 189 * - Signals its restart. 190 */ 191 192 .text 193 SUPERALIGN_TEXT 194 .globl _Xcpustop 195_Xcpustop: 196 pushl %eax 197 pushl %ds /* save current data segment */ 198 199#ifdef DEBUG_CPUSTOP 200 movb $0x50, %al 201 outb %al, $POST_ADDR 202#endif 203 204 movl $KDSEL, %eax 205 movl %ax, %ds /* use KERNEL data segment */ 206 207 movl _cpuid, %eax /* id */ 208 209 lock 210 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */ 211 212#ifdef DEBUG_CPUSTOP 213 movb $0x51, %al 214 outb %al, $POST_ADDR 215 movl _cpuid, %eax /* RESTORE id */ 216#endif 217 2181: 219 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */ 220 jnc 1b 221 222#ifdef DEBUG_CPUSTOP 223 movb $0x52, %al 224 outb %al, $POST_ADDR 225 movl _cpuid, %eax /* RESTORE id */ 226#endif 227 228 lock 229 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */ 230 231#ifdef DEBUG_CPUSTOP 232 movb $0x53, %al 233 outb %al, $POST_ADDR 234#endif 235 236 movl $lapic_eoi, %eax 237 movl $0, (%eax) /* End Of Interrupt to APIC */ 238 239 popl %ds /* restore previous data segment */ 240 popl %eax 241 242 iret 243#endif /* TEST_CPUSTOP */ 244 245 246MCOUNT_LABEL(bintr) 247 FAST_INTR(0,fastintr0) 248 FAST_INTR(1,fastintr1) 249 FAST_INTR(2,fastintr2) 250 FAST_INTR(3,fastintr3) 251 FAST_INTR(4,fastintr4) 252 FAST_INTR(5,fastintr5) 253 FAST_INTR(6,fastintr6) 254 FAST_INTR(7,fastintr7) 255 FAST_INTR(8,fastintr8) 256 FAST_INTR(9,fastintr9) 257 FAST_INTR(10,fastintr10) 258 FAST_INTR(11,fastintr11) 259 FAST_INTR(12,fastintr12) 260 FAST_INTR(13,fastintr13) 261 FAST_INTR(14,fastintr14) 262 FAST_INTR(15,fastintr15) 263 FAST_INTR(16,fastintr16) 264 FAST_INTR(17,fastintr17) 265 FAST_INTR(18,fastintr18) 266 FAST_INTR(19,fastintr19) 267 FAST_INTR(20,fastintr20) 268 FAST_INTR(21,fastintr21) 269 FAST_INTR(22,fastintr22) 270 FAST_INTR(23,fastintr23) 271 INTR(0,intr0) 272 INTR(1,intr1) 273 INTR(2,intr2) 274 INTR(3,intr3) 275 INTR(4,intr4) 276 INTR(5,intr5) 277 INTR(6,intr6) 278 INTR(7,intr7) 279 INTR(8,intr8) 280 INTR(9,intr9) 281 INTR(10,intr10) 282 INTR(11,intr11) 283 INTR(12,intr12) 284 INTR(13,intr13) 285 INTR(14,intr14) 286 INTR(15,intr15) 287 INTR(16,intr16) 288 INTR(17,intr17) 289 INTR(18,intr18) 290 INTR(19,intr19) 291 INTR(20,intr20) 292 INTR(21,intr21) 293 INTR(22,intr22) 294 INTR(23,intr23) 295MCOUNT_LABEL(eintr) 296 297 .data 298ihandlers: /* addresses of interrupt handlers */ 299 /* actually resumption addresses for HWI's */ 300 .long Xresume0, Xresume1, Xresume2, Xresume3 301 .long Xresume4, Xresume5, Xresume6, Xresume7 302 .long Xresume8, Xresume9, Xresume10, Xresume11 303 .long Xresume12, Xresume13, Xresume14, Xresume15 304 .long Xresume16, Xresume17, Xresume18, Xresume19 305 .long Xresume20, Xresume21, Xresume22, Xresume23 306 .long swi_tty, swi_net 307 .long 0, 0, 0, 0 308 .long _softclock, swi_ast 309 310imasks: /* masks for interrupt handlers */ 311 .space NHWI*4 /* padding; HWI masks are elsewhere */ 312 313 .long SWI_TTY_MASK, SWI_NET_MASK 314 .long 0, 0, 0, 0 315 .long SWI_CLOCK_MASK, SWI_AST_MASK 316 317 .globl _ivectors 318_ivectors: 319 .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 320 .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 321 .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 322 .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 323 .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 324 .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 325 326/* active flag for lazy masking */ 327iactive: 328 .long 0 329 330#ifdef TEST_CPUSTOP 331 .globl _stopped_cpus 332_stopped_cpus: 333 .long 0 334 335 .globl _started_cpus 336_started_cpus: 337 .long 0 338#endif /* TEST_CPUSTOP */ 339 340 341/* 342 * Interrupt counters and names. The format of these and the label names 343 * must agree with what vmstat expects. The tables are indexed by device 344 * ids so that we don't have to move the names around as devices are 345 * attached. 346 */ 347#include "vector.h" 348 .globl _intrcnt, _eintrcnt 349_intrcnt: 350 .space (NR_DEVICES + ICU_LEN) * 4 351_eintrcnt: 352 353 .globl _intrnames, _eintrnames 354_intrnames: 355 .ascii DEVICE_NAMES 356 .asciz "stray irq0" 357 .asciz "stray irq1" 358 .asciz "stray irq2" 359 .asciz "stray irq3" 360 .asciz "stray irq4" 361 .asciz "stray irq5" 362 .asciz "stray irq6" 363 .asciz "stray irq7" 364 .asciz "stray irq8" 365 .asciz "stray irq9" 366 .asciz "stray irq10" 367 .asciz "stray irq11" 368 .asciz "stray irq12" 369 .asciz "stray irq13" 370 .asciz "stray irq14" 371 .asciz "stray irq15" 372 .asciz "stray irq16" 373 .asciz "stray irq17" 374 .asciz "stray irq18" 375 .asciz "stray irq19" 376 .asciz "stray irq20" 377 .asciz "stray irq21" 378 .asciz "stray irq22" 379 .asciz "stray irq23" 380_eintrnames: 381 382 .text 383