apic_vector.s revision 35393
1/* 2 * from: vector.s, 386BSD 0.1 unknown origin 3 * $Id: apic_vector.s,v 1.28 1998/03/05 21:45:53 tegge Exp $ 4 */ 5 6 7#include <machine/apic.h> 8#include <machine/smp.h> 9 10#include "i386/isa/intr_machdep.h" 11 12 13#ifdef FAST_SIMPLELOCK 14 15#define GET_FAST_INTR_LOCK \ 16 pushl $_fast_intr_lock ; /* address of lock */ \ 17 call _s_lock ; /* MP-safe */ \ 18 addl $4,%esp 19 20#define REL_FAST_INTR_LOCK \ 21 pushl $_fast_intr_lock ; /* address of lock */ \ 22 call _s_unlock ; /* MP-safe */ \ 23 addl $4,%esp 24 25#else /* FAST_SIMPLELOCK */ 26 27#define GET_FAST_INTR_LOCK \ 28 call _get_isrlock 29 30#define REL_FAST_INTR_LOCK \ 31 pushl $_mp_lock ; /* GIANT_LOCK */ \ 32 call _MPrellock ; \ 33 add $4, %esp 34 35#endif /* FAST_SIMPLELOCK */ 36 37/* convert an absolute IRQ# into a bitmask */ 38#define IRQ_BIT(irq_num) (1 << (irq_num)) 39 40/* make an index into the IO APIC from the IRQ# */ 41#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) 42 43 44/* 45 * Macros for interrupt interrupt entry, call to handler, and exit. 46 */ 47 48#ifdef FAST_WITHOUTCPL 49 50/* 51 */ 52#define FAST_INTR(irq_num, vec_name) \ 53 .text ; \ 54 SUPERALIGN_TEXT ; \ 55IDTVEC(vec_name) ; \ 56 pushl %eax ; /* save only call-used registers */ \ 57 pushl %ecx ; \ 58 pushl %edx ; \ 59 pushl %ds ; \ 60 MAYBE_PUSHL_ES ; \ 61 movl $KDSEL,%eax ; \ 62 movl %ax,%ds ; \ 63 MAYBE_MOVW_AX_ES ; \ 64 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ 65 pushl _intr_unit + (irq_num) * 4 ; \ 66 GET_FAST_INTR_LOCK ; \ 67 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 68 REL_FAST_INTR_LOCK ; \ 69 addl $4, %esp ; \ 70 movl $0, lapic_eoi ; \ 71 lock ; \ 72 incl _cnt+V_INTR ; /* book-keeping can wait */ \ 73 movl _intr_countp + (irq_num) * 4, %eax ; \ 74 lock ; \ 75 incl (%eax) ; \ 76 MEXITCOUNT ; \ 77 MAYBE_POPL_ES ; \ 78 popl %ds ; \ 79 popl %edx ; \ 80 popl %ecx ; \ 81 popl %eax ; \ 82 iret 83 84#else /* FAST_WITHOUTCPL */ 85 86#define FAST_INTR(irq_num, vec_name) \ 87 .text ; \ 88 SUPERALIGN_TEXT ; \ 89IDTVEC(vec_name) ; \ 90 pushl %eax ; /* save only call-used registers */ \ 91 pushl %ecx ; \ 92 pushl %edx ; \ 93 pushl %ds ; \ 94 MAYBE_PUSHL_ES ; \ 95 movl $KDSEL, %eax ; \ 96 movl %ax, %ds ; \ 97 MAYBE_MOVW_AX_ES ; \ 98 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ 99 GET_FAST_INTR_LOCK ; \ 100 pushl _intr_unit + (irq_num) * 4 ; \ 101 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ 102 addl $4, %esp ; \ 103 movl $0, lapic_eoi ; \ 104 lock ; \ 105 incl _cnt+V_INTR ; /* book-keeping can wait */ \ 106 movl _intr_countp + (irq_num) * 4,%eax ; \ 107 lock ; \ 108 incl (%eax) ; \ 109 movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \ 110 notl %eax ; \ 111 andl _ipending, %eax ; \ 112 jne 2f ; /* yes, maybe handle them */ \ 1131: ; \ 114 MEXITCOUNT ; \ 115 REL_FAST_INTR_LOCK ; \ 116 MAYBE_POPL_ES ; \ 117 popl %ds ; \ 118 popl %edx ; \ 119 popl %ecx ; \ 120 popl %eax ; \ 121 iret ; \ 122; \ 123 ALIGN_TEXT ; \ 1242: ; \ 125 cmpb $3, _intr_nesting_level ; /* enough stack? */ \ 126 jae 1b ; /* no, return */ \ 127 movl _cpl, %eax ; \ 128 /* XXX next line is probably unnecessary now. */ \ 129 movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \ 130 lock ; \ 131 incb _intr_nesting_level ; /* ... really limit it ... */ \ 132 sti ; /* to do this as early as possible */ \ 133 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ 134 popl %ecx ; /* ... original %ds ... */ \ 135 popl %edx ; \ 136 xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \ 137 pushal ; /* build fat frame (grrr) ... */ \ 138 pushl %ecx ; /* ... actually %ds ... */ \ 139 pushl %es ; \ 140 movl $KDSEL, %eax ; \ 141 movl %ax, %es ; \ 142 movl (2+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \ 143 movl %ecx, (2+6)*4(%esp) ; /* ... to fat frame ... */ \ 144 movl (2+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \ 145 pushl %eax ; \ 146 subl $4, %esp ; /* junk for unit number */ \ 147 MEXITCOUNT ; \ 148 jmp _doreti 149 150#endif /** FAST_WITHOUTCPL */ 151 152 153/* 154 * 155 */ 156#define PUSH_FRAME \ 157 pushl $0 ; /* dummy error code */ \ 158 pushl $0 ; /* dummy trap type */ \ 159 pushal ; \ 160 pushl %ds ; /* save data and extra segments ... */ \ 161 pushl %es 162 163#define POP_FRAME \ 164 popl %es ; \ 165 popl %ds ; \ 166 popal ; \ 167 addl $4+4,%esp 168 169#define MASK_IRQ(irq_num) \ 170 IMASK_LOCK ; /* into critical reg */ \ 171 testl $IRQ_BIT(irq_num), _apic_imen ; \ 172 jne 7f ; /* masked, don't mask */ \ 173 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \ 174 movl _ioapic, %ecx ; /* ioapic[0] addr */ \ 175 movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \ 176 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ 177 orl $IOART_INTMASK, %eax ; /* set the mask */ \ 178 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 1797: ; /* already masked */ \ 180 IMASK_UNLOCK 181/* 182 * Test to see whether we are handling an edge or level triggered INT. 183 * Level-triggered INTs must still be masked as we don't clear the source, 184 * and the EOI cycle would cause redundant INTs to occur. 185 */ 186#define MASK_LEVEL_IRQ(irq_num) \ 187 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \ 188 jz 9f ; /* edge, don't mask */ \ 189 MASK_IRQ(irq_num) ; \ 1909: 191 192 193#ifdef APIC_INTR_REORDER 194#define EOI_IRQ(irq_num) \ 195 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \ 196 movl (%eax), %eax ; \ 197 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ 198 jz 9f ; /* not active */ \ 199 movl $0, lapic_eoi ; \ 200 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 2019: 202 203#else 204#define EOI_IRQ(irq_num) \ 205 testl $IRQ_BIT(irq_num), lapic_isr1; \ 206 jz 9f ; /* not active */ \ 207 movl $0, lapic_eoi; \ 208 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 2099: 210#endif 211 212 213/* 214 * Test to see if the source is currntly masked, clear if so. 215 */ 216#define UNMASK_IRQ(irq_num) \ 217 IMASK_LOCK ; /* into critical reg */ \ 218 testl $IRQ_BIT(irq_num), _apic_imen ; \ 219 je 7f ; /* bit clear, not masked */ \ 220 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \ 221 movl _ioapic,%ecx ; /* ioapic[0]addr */ \ 222 movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ 223 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ 224 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ 225 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 2267: ; \ 227 IMASK_UNLOCK 228 229#ifdef INTR_SIMPLELOCK 230#define ENLOCK 231#define DELOCK 232#define LATELOCK call _get_isrlock 233#else 234#define ENLOCK \ 235 ISR_TRYLOCK ; /* XXX this is going away... */ \ 236 testl %eax, %eax ; /* did we get it? */ \ 237 jz 3f 238#define DELOCK ISR_RELLOCK 239#define LATELOCK 240#endif 241 242#ifdef APIC_INTR_DIAGNOSTIC 243#ifdef APIC_INTR_DIAGNOSTIC_IRQ 244log_intr_event: 245 pushf 246 cli 247 pushl $CNAME(apic_itrace_debuglock) 248 call _s_lock_np 249 addl $4, %esp 250 movl CNAME(apic_itrace_debugbuffer_idx), %ecx 251 andl $32767, %ecx 252 movl _cpuid, %eax 253 shll $8, %eax 254 orl 8(%esp), %eax 255 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2) 256 incl %ecx 257 andl $32767, %ecx 258 movl %ecx, CNAME(apic_itrace_debugbuffer_idx) 259 pushl $CNAME(apic_itrace_debuglock) 260 call _s_unlock_np 261 addl $4, %esp 262 popf 263 ret 264 265 266#define APIC_ITRACE(name, irq_num, id) \ 267 lock ; /* MP-safe */ \ 268 incl CNAME(name) + (irq_num) * 4 ; \ 269 pushl %eax ; \ 270 pushl %ecx ; \ 271 pushl %edx ; \ 272 movl $(irq_num), %eax ; \ 273 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \ 274 jne 7f ; \ 275 pushl $id ; \ 276 call log_intr_event ; \ 277 addl $4, %esp ; \ 2787: ; \ 279 popl %edx ; \ 280 popl %ecx ; \ 281 popl %eax 282#else 283#define APIC_ITRACE(name, irq_num, id) \ 284 lock ; /* MP-safe */ \ 285 incl CNAME(name) + (irq_num) * 4 286#endif 287 288#define APIC_ITRACE_ENTER 1 289#define APIC_ITRACE_EOI 2 290#define APIC_ITRACE_TRYISRLOCK 3 291#define APIC_ITRACE_GOTISRLOCK 4 292#define APIC_ITRACE_ENTER2 5 293#define APIC_ITRACE_LEAVE 6 294#define APIC_ITRACE_UNMASK 7 295#define APIC_ITRACE_ACTIVE 8 296#define APIC_ITRACE_MASKED 9 297#define APIC_ITRACE_NOISRLOCK 10 298#define APIC_ITRACE_MASKED2 11 299#define APIC_ITRACE_SPLZ 12 300#define APIC_ITRACE_DORETI 13 301 302#else 303#define APIC_ITRACE(name, irq_num, id) 304#endif 305 306#ifdef CPL_AND_CML 307 308#define INTR(irq_num, vec_name) \ 309 .text ; \ 310 SUPERALIGN_TEXT ; \ 311/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ 312IDTVEC(vec_name) ; \ 313 PUSH_FRAME ; \ 314 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ 315 movl %ax, %ds ; \ 316 movl %ax, %es ; \ 317; \ 318 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ 319 lock ; /* MP-safe */ \ 320 btsl $(irq_num), iactive ; /* lazy masking */ \ 321 jc 1f ; /* already active */ \ 322; \ 323 MASK_LEVEL_IRQ(irq_num) ; \ 324 EOI_IRQ(irq_num) ; \ 3250: ; \ 326 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\ 327 ENLOCK ; \ 328; \ 329 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ 330 AVCPL_LOCK ; /* MP-safe */ \ 331 testl $IRQ_BIT(irq_num), _cpl ; \ 332 jne 2f ; /* this INT masked */ \ 333 testl $IRQ_BIT(irq_num), _cml ; \ 334 jne 2f ; /* this INT masked */ \ 335 orl $IRQ_BIT(irq_num), _cil ; \ 336 AVCPL_UNLOCK ; \ 337; \ 338 incb _intr_nesting_level ; \ 339; \ 340 /* entry point used by doreti_unpend for HWIs. */ \ 341__CONCAT(Xresume,irq_num): ; \ 342 FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \ 343 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \ 344 movl _intr_countp + (irq_num) * 4, %eax ; \ 345 lock ; incl (%eax) ; \ 346; \ 347 AVCPL_LOCK ; /* MP-safe */ \ 348 movl _cml, %eax ; \ 349 pushl %eax ; \ 350 orl _intr_mask + (irq_num) * 4, %eax ; \ 351 movl %eax, _cml ; \ 352 AVCPL_UNLOCK ; \ 353; \ 354 pushl _intr_unit + (irq_num) * 4 ; \ 355 incl _inside_intr ; \ 356 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ 357 sti ; \ 358 call *_intr_handler + (irq_num) * 4 ; \ 359 cli ; \ 360 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ 361 decl _inside_intr ; \ 362; \ 363 lock ; andl $~IRQ_BIT(irq_num), iactive ; \ 364 lock ; andl $~IRQ_BIT(irq_num), _cil ; \ 365 UNMASK_IRQ(irq_num) ; \ 366 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \ 367 sti ; /* doreti repeats cli/sti */ \ 368 MEXITCOUNT ; \ 369 LATELOCK ; \ 370 jmp _doreti ; \ 371; \ 372 ALIGN_TEXT ; \ 3731: ; /* active */ \ 374 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \ 375 MASK_IRQ(irq_num) ; \ 376 EOI_IRQ(irq_num) ; \ 377 AVCPL_LOCK ; /* MP-safe */ \ 378 orl $IRQ_BIT(irq_num), _ipending ; \ 379 AVCPL_UNLOCK ; \ 380 lock ; \ 381 btsl $(irq_num), iactive ; /* still active */ \ 382 jnc 0b ; /* retry */ \ 383 POP_FRAME ; \ 384 iret ; \ 385; \ 386 ALIGN_TEXT ; \ 3872: ; /* masked by cpl|cml */ \ 388 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ 389 orl $IRQ_BIT(irq_num), _ipending ; \ 390 AVCPL_UNLOCK ; \ 391 DELOCK ; /* XXX this is going away... */ \ 392 POP_FRAME ; \ 393 iret ; \ 394 ALIGN_TEXT ; \ 3953: ; /* other cpu has isr lock */ \ 396 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ 397 AVCPL_LOCK ; /* MP-safe */ \ 398 orl $IRQ_BIT(irq_num), _ipending ; \ 399 testl $IRQ_BIT(irq_num), _cpl ; \ 400 jne 4f ; /* this INT masked */ \ 401 testl $IRQ_BIT(irq_num), _cml ; \ 402 jne 4f ; /* this INT masked */ \ 403 orl $IRQ_BIT(irq_num), _cil ; \ 404 AVCPL_UNLOCK ; \ 405 call forward_irq ; /* forward irq to lock holder */ \ 406 POP_FRAME ; /* and return */ \ 407 iret ; \ 408 ALIGN_TEXT ; \ 4094: ; /* blocked */ \ 410 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\ 411 AVCPL_UNLOCK ; \ 412 POP_FRAME ; /* and return */ \ 413 iret 414 415#else /* CPL_AND_CML */ 416 417 418#define INTR(irq_num, vec_name) \ 419 .text ; \ 420 SUPERALIGN_TEXT ; \ 421/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ 422IDTVEC(vec_name) ; \ 423 PUSH_FRAME ; \ 424 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ 425 movl %ax, %ds ; \ 426 movl %ax, %es ; \ 427; \ 428 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ 429 lock ; /* MP-safe */ \ 430 btsl $(irq_num), iactive ; /* lazy masking */ \ 431 jc 1f ; /* already active */ \ 432; \ 433 MASK_LEVEL_IRQ(irq_num) ; \ 434 EOI_IRQ(irq_num) ; \ 4350: ; \ 436 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\ 437 ISR_TRYLOCK ; /* XXX this is going away... */ \ 438 testl %eax, %eax ; /* did we get it? */ \ 439 jz 3f ; /* no */ \ 440; \ 441 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ 442 AVCPL_LOCK ; /* MP-safe */ \ 443 testl $IRQ_BIT(irq_num), _cpl ; \ 444 jne 2f ; /* this INT masked */ \ 445 AVCPL_UNLOCK ; \ 446; \ 447 incb _intr_nesting_level ; \ 448; \ 449 /* entry point used by doreti_unpend for HWIs. */ \ 450__CONCAT(Xresume,irq_num): ; \ 451 FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \ 452 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \ 453 movl _intr_countp + (irq_num) * 4, %eax ; \ 454 lock ; incl (%eax) ; \ 455; \ 456 AVCPL_LOCK ; /* MP-safe */ \ 457 movl _cpl, %eax ; \ 458 pushl %eax ; \ 459 orl _intr_mask + (irq_num) * 4, %eax ; \ 460 movl %eax, _cpl ; \ 461 andl $~IRQ_BIT(irq_num), _ipending ; \ 462 AVCPL_UNLOCK ; \ 463; \ 464 pushl _intr_unit + (irq_num) * 4 ; \ 465 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ 466 sti ; \ 467 call *_intr_handler + (irq_num) * 4 ; \ 468 cli ; \ 469 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ 470; \ 471 lock ; andl $~IRQ_BIT(irq_num), iactive ; \ 472 UNMASK_IRQ(irq_num) ; \ 473 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \ 474 sti ; /* doreti repeats cli/sti */ \ 475 MEXITCOUNT ; \ 476 jmp _doreti ; \ 477; \ 478 ALIGN_TEXT ; \ 4791: ; /* active */ \ 480 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \ 481 MASK_IRQ(irq_num) ; \ 482 EOI_IRQ(irq_num) ; \ 483 AVCPL_LOCK ; /* MP-safe */ \ 484 orl $IRQ_BIT(irq_num), _ipending ; \ 485 AVCPL_UNLOCK ; \ 486 lock ; \ 487 btsl $(irq_num), iactive ; /* still active */ \ 488 jnc 0b ; /* retry */ \ 489 POP_FRAME ; \ 490 iret ; /* XXX: iactive bit might be 0 now */ \ 491 ALIGN_TEXT ; \ 4922: ; /* masked by cpl, leave iactive set */ \ 493 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ 494 orl $IRQ_BIT(irq_num), _ipending ; \ 495 AVCPL_UNLOCK ; \ 496 ISR_RELLOCK ; /* XXX this is going away... */ \ 497 POP_FRAME ; \ 498 iret ; \ 499 ALIGN_TEXT ; \ 5003: ; /* other cpu has isr lock */ \ 501 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ 502 AVCPL_LOCK ; /* MP-safe */ \ 503 orl $IRQ_BIT(irq_num), _ipending ; \ 504 testl $IRQ_BIT(irq_num), _cpl ; \ 505 jne 4f ; /* this INT masked */ \ 506 AVCPL_UNLOCK ; \ 507 call forward_irq ; /* forward irq to lock holder */ \ 508 POP_FRAME ; /* and return */ \ 509 iret ; \ 510 ALIGN_TEXT ; \ 5114: ; /* blocked */ \ 512 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\ 513 AVCPL_UNLOCK ; \ 514 POP_FRAME ; /* and return */ \ 515 iret 516 517#endif /* CPL_AND_CML */ 518 519 520/* 521 * Handle "spurious INTerrupts". 522 * Notes: 523 * This is different than the "spurious INTerrupt" generated by an 524 * 8259 PIC for missing INTs. See the APIC documentation for details. 525 * This routine should NOT do an 'EOI' cycle. 526 */ 527 .text 528 SUPERALIGN_TEXT 529 .globl _Xspuriousint 530_Xspuriousint: 531 532 /* No EOI cycle used here */ 533 534 iret 535 536 537/* 538 * Handle TLB shootdowns. 539 */ 540 .text 541 SUPERALIGN_TEXT 542 .globl _Xinvltlb 543_Xinvltlb: 544 pushl %eax 545 546#ifdef COUNT_XINVLTLB_HITS 547 ss 548 movl _cpuid, %eax 549 ss 550 incl _xhits(,%eax,4) 551#endif /* COUNT_XINVLTLB_HITS */ 552 553 movl %cr3, %eax /* invalidate the TLB */ 554 movl %eax, %cr3 555 556 ss /* stack segment, avoid %ds load */ 557 movl $0, lapic_eoi /* End Of Interrupt to APIC */ 558 559 popl %eax 560 iret 561 562 563#ifdef BETTER_CLOCK 564 565/* 566 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, 567 * 568 * - Stores current cpu state in checkstate_cpustate[cpuid] 569 * 0 == user, 1 == sys, 2 == intr 570 * - Stores current process in checkstate_curproc[cpuid] 571 * 572 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. 573 * 574 * stack: 0 -> ds, 4 -> ebx, 8 -> eax, 12 -> eip, 16 -> cs, 20 -> eflags 575 */ 576 577 .text 578 SUPERALIGN_TEXT 579 .globl _Xcpucheckstate 580 .globl _checkstate_cpustate 581 .globl _checkstate_curproc 582 .globl _checkstate_pc 583_Xcpucheckstate: 584 pushl %eax 585 pushl %ebx 586 pushl %ds /* save current data segment */ 587 588 movl $KDSEL, %eax 589 movl %ax, %ds /* use KERNEL data segment */ 590 591 movl $0, lapic_eoi /* End Of Interrupt to APIC */ 592 593 movl $0, %ebx 594 movl 16(%esp), %eax 595 andl $3, %eax 596 cmpl $3, %eax 597 je 1f 598#ifdef VM86 599 testl $PSL_VM, 20(%esp) 600 jne 1f 601#endif 602 incl %ebx /* system or interrupt */ 603#ifdef CPL_AND_CML 604 cmpl $0, _inside_intr 605 je 1f 606 incl %ebx /* interrupt */ 607#endif 6081: 609 movl _cpuid, %eax 610 movl %ebx, _checkstate_cpustate(,%eax,4) 611 movl _curproc, %ebx 612 movl %ebx, _checkstate_curproc(,%eax,4) 613 movl 12(%esp), %ebx 614 movl %ebx, _checkstate_pc(,%eax,4) 615 616 lock /* checkstate_probed_cpus |= (1<<id) */ 617 btsl %eax, _checkstate_probed_cpus 618 619 popl %ds /* restore previous data segment */ 620 popl %ebx 621 popl %eax 622 iret 623 624#endif /* BETTER_CLOCK */ 625 626/* 627 * Executed by a CPU when it receives an Xcpuast IPI from another CPU, 628 * 629 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast. 630 * 631 * - We need a better method of triggering asts on other cpus. 632 */ 633 634 .text 635 SUPERALIGN_TEXT 636 .globl _Xcpuast 637_Xcpuast: 638 PUSH_FRAME 639 movl $KDSEL, %eax 640 movl %ax, %ds /* use KERNEL data segment */ 641 movl %ax, %es 642 643 movl _cpuid, %eax 644 lock /* checkstate_need_ast &= ~(1<<id) */ 645 btrl %eax, _checkstate_need_ast 646 movl $0, lapic_eoi /* End Of Interrupt to APIC */ 647 648 lock 649 btsl %eax, _checkstate_pending_ast 650 jc 1f 651 652 FAKE_MCOUNT(12*4(%esp)) 653 654 /* 655 * Giant locks do not come cheap. 656 * A lot of cycles are going to be wasted here. 657 */ 658 call _get_isrlock 659 660 AVCPL_LOCK 661#ifdef CPL_AND_CML 662 movl _cml, %eax 663#else 664 movl _cpl, %eax 665#endif 666 pushl %eax 667 orl $SWI_AST_PENDING, _ipending 668 AVCPL_UNLOCK 669 lock 670 incb _intr_nesting_level 671 sti 672 673 pushl $0 674 675 movl _cpuid, %eax 676 lock 677 btrl %eax, _checkstate_pending_ast 678 679 MEXITCOUNT 680 jmp _doreti 6811: 682 /* We are already in the process of delivering an ast for this CPU */ 683 POP_FRAME 684 iret 685 686 687/* 688 * Executed by a CPU when it receives an XFORWARD_IRQ IPI. 689 */ 690 691 .text 692 SUPERALIGN_TEXT 693 .globl _Xforward_irq 694_Xforward_irq: 695 PUSH_FRAME 696 movl $KDSEL, %eax 697 movl %ax, %ds /* use KERNEL data segment */ 698 movl %ax, %es 699 700 movl $0, lapic_eoi /* End Of Interrupt to APIC */ 701 702 FAKE_MCOUNT(12*4(%esp)) 703 704 ISR_TRYLOCK 705 testl %eax,%eax /* Did we get the lock ? */ 706 jz 1f /* No */ 707 708 lock 709 incl CNAME(forward_irq_hitcnt) 710 cmpb $4, _intr_nesting_level 711 jae 2f 712 713 jmp 3f 714 715 AVCPL_LOCK 716#ifdef CPL_AND_CML 717 movl _cml, %eax 718#else 719 movl _cpl, %eax 720#endif 721 pushl %eax 722 AVCPL_UNLOCK 723 lock 724 incb _intr_nesting_level 725 sti 726 727 pushl $0 728 729 MEXITCOUNT 730 jmp _doreti /* Handle forwarded interrupt */ 7314: 732 lock 733 decb _intr_nesting_level 734 ISR_RELLOCK 735 MEXITCOUNT 736 addl $8, %esp 737 POP_FRAME 738 iret 7391: 740 lock 741 incl CNAME(forward_irq_misscnt) 742 call forward_irq /* Oops, we've lost the isr lock */ 743 MEXITCOUNT 744 POP_FRAME 745 iret 7462: 747 lock 748 incl CNAME(forward_irq_toodeepcnt) 7493: 750 ISR_RELLOCK 751 MEXITCOUNT 752 POP_FRAME 753 iret 754 755/* 756 * 757 */ 758forward_irq: 759 MCOUNT 760 cmpl $0,_invltlb_ok 761 jz 4f 762 763 cmpl $0, CNAME(forward_irq_enabled) 764 jz 4f 765 766 movl _mp_lock,%eax 767 cmpl $FREE_LOCK,%eax 768 jne 1f 769 movl $0, %eax /* Pick CPU #0 if noone has lock */ 7701: 771 shrl $24,%eax 772 movl _cpu_num_to_apic_id(,%eax,4),%ecx 773 shll $24,%ecx 774 movl lapic_icr_hi, %eax 775 andl $~APIC_ID_MASK, %eax 776 orl %ecx, %eax 777 movl %eax, lapic_icr_hi 778 7792: 780 movl lapic_icr_lo, %eax 781 andl $APIC_DELSTAT_MASK,%eax 782 jnz 2b 783 movl lapic_icr_lo, %eax 784 andl $APIC_RESV2_MASK, %eax 785 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax 786 movl %eax, lapic_icr_lo 7873: 788 movl lapic_icr_lo, %eax 789 andl $APIC_DELSTAT_MASK,%eax 790 jnz 3b 7914: 792 ret 793 794/* 795 * Executed by a CPU when it receives an Xcpustop IPI from another CPU, 796 * 797 * - Signals its receipt. 798 * - Waits for permission to restart. 799 * - Signals its restart. 800 */ 801 802 .text 803 SUPERALIGN_TEXT 804 .globl _Xcpustop 805_Xcpustop: 806 pushl %eax 807 pushl %ds /* save current data segment */ 808 809 movl $KDSEL, %eax 810 movl %ax, %ds /* use KERNEL data segment */ 811 812 movl _cpuid, %eax 813 814 lock 815 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */ 8161: 817 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */ 818 jnc 1b 819 820 lock 821 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */ 822 823 movl $0, lapic_eoi /* End Of Interrupt to APIC */ 824 825 popl %ds /* restore previous data segment */ 826 popl %eax 827 iret 828 829 830MCOUNT_LABEL(bintr) 831 FAST_INTR(0,fastintr0) 832 FAST_INTR(1,fastintr1) 833 FAST_INTR(2,fastintr2) 834 FAST_INTR(3,fastintr3) 835 FAST_INTR(4,fastintr4) 836 FAST_INTR(5,fastintr5) 837 FAST_INTR(6,fastintr6) 838 FAST_INTR(7,fastintr7) 839 FAST_INTR(8,fastintr8) 840 FAST_INTR(9,fastintr9) 841 FAST_INTR(10,fastintr10) 842 FAST_INTR(11,fastintr11) 843 FAST_INTR(12,fastintr12) 844 FAST_INTR(13,fastintr13) 845 FAST_INTR(14,fastintr14) 846 FAST_INTR(15,fastintr15) 847 FAST_INTR(16,fastintr16) 848 FAST_INTR(17,fastintr17) 849 FAST_INTR(18,fastintr18) 850 FAST_INTR(19,fastintr19) 851 FAST_INTR(20,fastintr20) 852 FAST_INTR(21,fastintr21) 853 FAST_INTR(22,fastintr22) 854 FAST_INTR(23,fastintr23) 855 INTR(0,intr0) 856 INTR(1,intr1) 857 INTR(2,intr2) 858 INTR(3,intr3) 859 INTR(4,intr4) 860 INTR(5,intr5) 861 INTR(6,intr6) 862 INTR(7,intr7) 863 INTR(8,intr8) 864 INTR(9,intr9) 865 INTR(10,intr10) 866 INTR(11,intr11) 867 INTR(12,intr12) 868 INTR(13,intr13) 869 INTR(14,intr14) 870 INTR(15,intr15) 871 INTR(16,intr16) 872 INTR(17,intr17) 873 INTR(18,intr18) 874 INTR(19,intr19) 875 INTR(20,intr20) 876 INTR(21,intr21) 877 INTR(22,intr22) 878 INTR(23,intr23) 879MCOUNT_LABEL(eintr) 880 881 .data 882/* 883 * Addresses of interrupt handlers. 884 * XresumeNN: Resumption addresses for HWIs. 885 */ 886 .globl _ihandlers 887_ihandlers: 888ihandlers: 889/* 890 * used by: 891 * ipl.s: doreti_unpend 892 */ 893 .long Xresume0, Xresume1, Xresume2, Xresume3 894 .long Xresume4, Xresume5, Xresume6, Xresume7 895 .long Xresume8, Xresume9, Xresume10, Xresume11 896 .long Xresume12, Xresume13, Xresume14, Xresume15 897 .long Xresume16, Xresume17, Xresume18, Xresume19 898 .long Xresume20, Xresume21, Xresume22, Xresume23 899/* 900 * used by: 901 * ipl.s: doreti_unpend 902 * apic_ipl.s: splz_unpend 903 */ 904 .long swi_tty, swi_net 905 .long dummycamisr, dummycamisr 906 .long _swi_vm, 0 907 .long _softclock, swi_ast 908 909imasks: /* masks for interrupt handlers */ 910 .space NHWI*4 /* padding; HWI masks are elsewhere */ 911 912 .long SWI_TTY_MASK, SWI_NET_MASK 913 .long SWI_CAMNET_MASK, SWI_CAMBIO_MASK 914 .long SWI_VM_MASK, 0 915 .long SWI_CLOCK_MASK, SWI_AST_MASK 916 917/* active flag for lazy masking */ 918iactive: 919 .long 0 920 921#ifdef COUNT_XINVLTLB_HITS 922 .globl _xhits 923_xhits: 924 .space (NCPU * 4), 0 925#endif /* COUNT_XINVLTLB_HITS */ 926 927/* variables used by stop_cpus()/restart_cpus()/Xcpustop */ 928 .globl _stopped_cpus, _started_cpus 929_stopped_cpus: 930 .long 0 931_started_cpus: 932 .long 0 933 934#ifdef BETTER_CLOCK 935 .globl _checkstate_probed_cpus 936_checkstate_probed_cpus: 937 .long 0 938#endif /* BETTER_CLOCK */ 939 .globl _checkstate_need_ast 940_checkstate_need_ast: 941 .long 0 942_checkstate_pending_ast: 943 .long 0 944 .globl CNAME(forward_irq_misscnt) 945 .globl CNAME(forward_irq_toodeepcnt) 946 .globl CNAME(forward_irq_hitcnt) 947CNAME(forward_irq_misscnt): 948 .long 0 949CNAME(forward_irq_hitcnt): 950 .long 0 951CNAME(forward_irq_toodeepcnt): 952 .long 0 953 954 955 .globl _apic_pin_trigger 956_apic_pin_trigger: 957 .space (NAPIC * 4), 0 958 959 960/* 961 * Interrupt counters and names. The format of these and the label names 962 * must agree with what vmstat expects. The tables are indexed by device 963 * ids so that we don't have to move the names around as devices are 964 * attached. 965 */ 966#include "vector.h" 967 .globl _intrcnt, _eintrcnt 968_intrcnt: 969 .space (NR_DEVICES + ICU_LEN) * 4 970_eintrcnt: 971 972 .globl _intrnames, _eintrnames 973_intrnames: 974 .ascii DEVICE_NAMES 975 .asciz "stray irq0" 976 .asciz "stray irq1" 977 .asciz "stray irq2" 978 .asciz "stray irq3" 979 .asciz "stray irq4" 980 .asciz "stray irq5" 981 .asciz "stray irq6" 982 .asciz "stray irq7" 983 .asciz "stray irq8" 984 .asciz "stray irq9" 985 .asciz "stray irq10" 986 .asciz "stray irq11" 987 .asciz "stray irq12" 988 .asciz "stray irq13" 989 .asciz "stray irq14" 990 .asciz "stray irq15" 991 .asciz "stray irq16" 992 .asciz "stray irq17" 993 .asciz "stray irq18" 994 .asciz "stray irq19" 995 .asciz "stray irq20" 996 .asciz "stray irq21" 997 .asciz "stray irq22" 998 .asciz "stray irq23" 999_eintrnames: 1000 1001 .text 1002