swtch.s revision 25243
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $Id: swtch.s,v 1.48 1997/04/26 11:45:24 peter Exp $ 37 */ 38 39#include "npx.h" 40#include "opt_user_ldt.h" 41#include "opt_smp.h" 42#include "opt_smp_privpages.h" 43 44#include <sys/rtprio.h> 45 46#include <machine/asmacros.h> 47#include <machine/spl.h> 48#include <machine/smpasm.h> 49#include <machine/smptests.h> /** TEST_LOPRIO */ 50 51#if defined(SMP) && defined(SMP_PRIVPAGES) 52#include <machine/pmap.h> 53#endif 54 55#include "assym.s" 56 57 58/*****************************************************************************/ 59/* Scheduling */ 60/*****************************************************************************/ 61 62/* 63 * The following primitives manipulate the run queues. 64 * _whichqs tells which of the 32 queues _qs 65 * have processes in them. setrunqueue puts processes into queues, Remrq 66 * removes them from queues. The running process is on no queue, 67 * other processes are on a queue related to p->p_priority, divided by 4 68 * actually to shrink the 0-127 range of priorities into the 32 available 69 * queues. 70 */ 71 .data 72#ifndef SMP 73 .globl _curpcb 74_curpcb: .long 0 /* pointer to curproc's PCB area */ 75#endif 76 .globl _whichqs, _whichrtqs, _whichidqs 77 78_whichqs: .long 0 /* which run queues have data */ 79_whichrtqs: .long 0 /* which realtime run queues have data */ 80_whichidqs: .long 0 /* which idletime run queues have data */ 81 .globl _hlt_vector 82_hlt_vector: .long _default_halt /* pointer to halt routine */ 83 84 85 .globl _qs,_cnt,_panic 86 87 .globl _want_resched 88_want_resched: .long 0 /* we need to re-run the scheduler */ 89 90 .text 91/* 92 * setrunqueue(p) 93 * 94 * Call should be made at spl6(), and p->p_stat should be SRUN 95 */ 96ENTRY(setrunqueue) 97 movl 4(%esp),%eax 98#ifdef DIAGNOSTIC 99 cmpb $SRUN,P_STAT(%eax) 100 je set1 101 pushl $set2 102 call _panic 103set1: 104#endif 105 cmpw $RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */ 106 je set_nort 107 108 movzwl P_RTPRIO_PRIO(%eax),%edx 109 110 cmpw $RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* realtime priority? */ 111 jne set_id /* must be idle priority */ 112 113set_rt: 114 btsl %edx,_whichrtqs /* set q full bit */ 115 shll $3,%edx 116 addl $_rtqs,%edx /* locate q hdr */ 117 movl %edx,P_FORW(%eax) /* link process on tail of q */ 118 movl P_BACK(%edx),%ecx 119 movl %ecx,P_BACK(%eax) 120 movl %eax,P_BACK(%edx) 121 movl %eax,P_FORW(%ecx) 122 ret 123 124set_id: 125 btsl %edx,_whichidqs /* set q full bit */ 126 shll $3,%edx 127 addl $_idqs,%edx /* locate q hdr */ 128 movl %edx,P_FORW(%eax) /* link process on tail of q */ 129 movl P_BACK(%edx),%ecx 130 movl %ecx,P_BACK(%eax) 131 movl %eax,P_BACK(%edx) 132 movl %eax,P_FORW(%ecx) 133 ret 134 135set_nort: /* Normal (RTOFF) code */ 136 movzbl P_PRI(%eax),%edx 137 shrl $2,%edx 138 btsl %edx,_whichqs /* set q full bit */ 139 shll $3,%edx 140 addl $_qs,%edx /* locate q hdr */ 141 movl %edx,P_FORW(%eax) /* link process on tail of q */ 142 movl P_BACK(%edx),%ecx 143 movl %ecx,P_BACK(%eax) 144 movl %eax,P_BACK(%edx) 145 movl %eax,P_FORW(%ecx) 146 ret 147 148set2: .asciz "setrunqueue" 149 150/* 151 * Remrq(p) 152 * 153 * Call should be made at spl6(). 154 */ 155ENTRY(remrq) 156 movl 4(%esp),%eax 157 cmpw $RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */ 158 je rem_nort 159 160 movzwl P_RTPRIO_PRIO(%eax),%edx 161 162 cmpw $RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* normal priority process? */ 163 jne rem_id 164 165 btrl %edx,_whichrtqs /* clear full bit, panic if clear already */ 166 jb rem1rt 167 pushl $rem3rt 168 call _panic 169rem1rt: 170 pushl %edx 171 movl P_FORW(%eax),%ecx /* unlink process */ 172 movl P_BACK(%eax),%edx 173 movl %edx,P_BACK(%ecx) 174 movl P_BACK(%eax),%ecx 175 movl P_FORW(%eax),%edx 176 movl %edx,P_FORW(%ecx) 177 popl %edx 178 movl $_rtqs,%ecx 179 shll $3,%edx 180 addl %edx,%ecx 181 cmpl P_FORW(%ecx),%ecx /* q still has something? */ 182 je rem2rt 183 shrl $3,%edx /* yes, set bit as still full */ 184 btsl %edx,_whichrtqs 185rem2rt: 186 ret 187rem_id: 188 btrl %edx,_whichidqs /* clear full bit, panic if clear already */ 189 jb rem1id 190 pushl $rem3id 191 call _panic 192rem1id: 193 pushl %edx 194 movl P_FORW(%eax),%ecx /* unlink process */ 195 movl P_BACK(%eax),%edx 196 movl %edx,P_BACK(%ecx) 197 movl P_BACK(%eax),%ecx 198 movl P_FORW(%eax),%edx 199 movl %edx,P_FORW(%ecx) 200 popl %edx 201 movl $_idqs,%ecx 202 shll $3,%edx 203 addl %edx,%ecx 204 cmpl P_FORW(%ecx),%ecx /* q still has something? */ 205 je rem2id 206 shrl $3,%edx /* yes, set bit as still full */ 207 btsl %edx,_whichidqs 208rem2id: 209 ret 210 211rem_nort: 212 movzbl P_PRI(%eax),%edx 213 shrl $2,%edx 214 btrl %edx,_whichqs /* clear full bit, panic if clear already */ 215 jb rem1 216 pushl $rem3 217 call _panic 218rem1: 219 pushl %edx 220 movl P_FORW(%eax),%ecx /* unlink process */ 221 movl P_BACK(%eax),%edx 222 movl %edx,P_BACK(%ecx) 223 movl P_BACK(%eax),%ecx 224 movl P_FORW(%eax),%edx 225 movl %edx,P_FORW(%ecx) 226 popl %edx 227 movl $_qs,%ecx 228 shll $3,%edx 229 addl %edx,%ecx 230 cmpl P_FORW(%ecx),%ecx /* q still has something? */ 231 je rem2 232 shrl $3,%edx /* yes, set bit as still full */ 233 btsl %edx,_whichqs 234rem2: 235 ret 236 237rem3: .asciz "remrq" 238rem3rt: .asciz "remrq.rt" 239rem3id: .asciz "remrq.id" 240 241/* 242 * When no processes are on the runq, cpu_switch() branches to _idle 243 * to wait for something to come ready. 244 * 245 * NOTE: on an SMP system this routine is a startup-only code path. 246 * once initialization is over, meaning the idle procs have been 247 * created, we should NEVER branch here. 248 */ 249 ALIGN_TEXT 250_idle: 251#ifdef SMP 252 movl _smp_active, %eax 253 cmpl $0, %eax 254 jnz badsw 255#endif /* SMP */ 256 xorl %ebp,%ebp 257 movl $HIDENAME(tmpstk),%esp 258 movl _IdlePTD,%ecx 259 movl %ecx,%cr3 260 261 /* update common_tss.tss_esp0 pointer */ 262#ifdef SMP 263 GETCPUID(%eax) 264 movl _SMPcommon_tss_ptr(,%eax,4), %eax 265#else 266 movl $_common_tss, %eax 267#endif 268 movl %esp, TSS_ESP0(%eax) 269 270#ifdef TSS_IS_CACHED /* example only */ 271 /* Reload task register to force reload of selector */ 272 movl _tssptr, %ebx 273 andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ 274 movl _gsel_tss, %ebx 275 ltr %bx 276#endif 277 278 sti 279 280 /* 281 * XXX callers of cpu_switch() do a bogus splclock(). Locking should 282 * be left to cpu_switch(). 283 */ 284 movl $SWI_AST_MASK,_cpl 285 testl $~SWI_AST_MASK,_ipending 286 je idle_loop 287 call _splz 288 289 ALIGN_TEXT 290idle_loop: 291 cli 292 movb $1,_intr_nesting_level /* charge Intr if we leave */ 293 cmpl $0,_whichrtqs /* real-time queue */ 294 CROSSJUMP(jne, sw1a, je) 295 cmpl $0,_whichqs /* normal queue */ 296 CROSSJUMP(jne, nortqr, je) 297 cmpl $0,_whichidqs /* 'idle' queue */ 298 CROSSJUMP(jne, idqr, je) 299 movb $0,_intr_nesting_level /* charge Idle for this loop */ 300 call _vm_page_zero_idle 301 testl %eax, %eax 302 jnz idle_loop 303 sti 304 call *_hlt_vector /* wait for interrupt */ 305 jmp idle_loop 306 307CROSSJUMPTARGET(_idle) 308 309ENTRY(default_halt) 310 hlt 311 ret 312 313/* 314 * cpu_switch() 315 */ 316ENTRY(cpu_switch) 317 318 /* switch to new process. first, save context as needed */ 319 GETCURPROC(%ecx) 320 321 /* if no process to save, don't bother */ 322 testl %ecx,%ecx 323 je sw1 324 325#ifdef SMP 326 movb P_ONCPU(%ecx), %al /* save "last" cpu */ 327 movb %al, P_LASTCPU(%ecx) 328 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ 329#endif 330 331 movl P_ADDR(%ecx),%ecx 332 333 movl (%esp),%eax /* Hardware registers */ 334 movl %eax,PCB_EIP(%ecx) 335 movl %ebx,PCB_EBX(%ecx) 336 movl %esp,PCB_ESP(%ecx) 337 movl %ebp,PCB_EBP(%ecx) 338 movl %esi,PCB_ESI(%ecx) 339 movl %edi,PCB_EDI(%ecx) 340 341#ifdef SMP 342 movl _mp_lock, %eax 343 cmpl $0xffffffff, %eax /* is it free? */ 344 je badsw /* yes, bad medicine! */ 345 andl $0x00ffffff, %eax /* clear CPU portion */ 346 movl %eax,PCB_MPNEST(%ecx) /* store it */ 347#endif /* SMP */ 348 349#if NNPX > 0 350 /* have we used fp, and need a save? */ 351 GETCURPROC(%eax) 352 GETNPXPROC(%ebx) 353 cmp %eax,%ebx 354 jne 1f 355 addl $PCB_SAVEFPU,%ecx /* h/w bugs make saving complicated */ 356 pushl %ecx 357 call _npxsave /* do it in a big C function */ 358 popl %eax 3591: 360#endif /* NNPX > 0 */ 361 362 movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ 363 364 SETCURPROC($0, %edi) 365 366 /* save is done, now choose a new process or idle */ 367sw1: 368 cli 369sw1a: 370 movl _whichrtqs,%edi /* pick next p. from rtqs */ 371 testl %edi,%edi 372 jz nortqr /* no realtime procs */ 373 374 /* XXX - bsf is sloow */ 375 bsfl %edi,%ebx /* find a full q */ 376 jz nortqr /* no proc on rt q - try normal ... */ 377 378 /* XX update whichqs? */ 379 btrl %ebx,%edi /* clear q full status */ 380 leal _rtqs(,%ebx,8),%eax /* select q */ 381 movl %eax,%esi 382 383 movl P_FORW(%eax),%ecx /* unlink from front of process q */ 384 movl P_FORW(%ecx),%edx 385 movl %edx,P_FORW(%eax) 386 movl P_BACK(%ecx),%eax 387 movl %eax,P_BACK(%edx) 388 389 cmpl P_FORW(%ecx),%esi /* q empty */ 390 je rt3 391 btsl %ebx,%edi /* nope, set to indicate not empty */ 392rt3: 393 movl %edi,_whichrtqs /* update q status */ 394 jmp swtch_com 395 396 /* old sw1a */ 397/* Normal process priority's */ 398nortqr: 399 movl _whichqs,%edi 4002: 401 /* XXX - bsf is sloow */ 402 bsfl %edi,%ebx /* find a full q */ 403 jz idqr /* if none, idle */ 404 405 /* XX update whichqs? */ 406 btrl %ebx,%edi /* clear q full status */ 407 leal _qs(,%ebx,8),%eax /* select q */ 408 movl %eax,%esi 409 410 movl P_FORW(%eax),%ecx /* unlink from front of process q */ 411 movl P_FORW(%ecx),%edx 412 movl %edx,P_FORW(%eax) 413 movl P_BACK(%ecx),%eax 414 movl %eax,P_BACK(%edx) 415 416 cmpl P_FORW(%ecx),%esi /* q empty */ 417 je 3f 418 btsl %ebx,%edi /* nope, set to indicate not empty */ 4193: 420 movl %edi,_whichqs /* update q status */ 421 jmp swtch_com 422 423idqr: /* was sw1a */ 424 movl _whichidqs,%edi /* pick next p. from idqs */ 425 426 /* XXX - bsf is sloow */ 427 bsfl %edi,%ebx /* find a full q */ 428 CROSSJUMP(je, _idle, jne) /* if no proc, idle */ 429 430 /* XX update whichqs? */ 431 btrl %ebx,%edi /* clear q full status */ 432 leal _idqs(,%ebx,8),%eax /* select q */ 433 movl %eax,%esi 434 435 movl P_FORW(%eax),%ecx /* unlink from front of process q */ 436 movl P_FORW(%ecx),%edx 437 movl %edx,P_FORW(%eax) 438 movl P_BACK(%ecx),%eax 439 movl %eax,P_BACK(%edx) 440 441 cmpl P_FORW(%ecx),%esi /* q empty */ 442 je id3 443 btsl %ebx,%edi /* nope, set to indicate not empty */ 444id3: 445 movl %edi,_whichidqs /* update q status */ 446 447swtch_com: 448 movl $0,%eax 449 movl %eax,_want_resched 450 451#ifdef DIAGNOSTIC 452 cmpl %eax,P_WCHAN(%ecx) 453 jne badsw 454 cmpb $SRUN,P_STAT(%ecx) 455 jne badsw 456#endif 457 458 movl %eax,P_BACK(%ecx) /* isolate process to run */ 459 movl P_ADDR(%ecx),%edx 460 movl PCB_CR3(%edx),%ebx 461 462#if defined(SMP) && defined(SMP_PRIVPAGES) 463 /* Grab the private PT pointer from the outgoing process's PTD */ 464 movl $_PTD,%esi 465 movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */ 466#endif 467 468 /* switch address space */ 469 movl %ebx,%cr3 470 471#if defined(SMP) && defined(SMP_PRIVPAGES) 472 /* Copy the private PT to the new process's PTD */ 473 /* XXX yuck, the _PTD changes when we switch, so we have to 474 * reload %cr3 after changing the address space. 475 * We need to fix this by storing a pointer to the virtual 476 * location of the per-process PTD in the PCB or something quick. 477 * Dereferencing proc->vm_map->pmap->p_pdir[] is painful in asm. 478 */ 479 movl $_PTD,%esi 480 movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */ 481 482 /* XXX: we have just changed the page tables.. reload.. */ 483 movl %ebx,%cr3 484#endif 485 486#ifdef HOW_TO_SWITCH_TSS /* example only */ 487 /* Fix up tss pointer to floating pcb/stack structure */ 488 /* XXX probably lots faster to store the 64 bits of tss entry 489 * in the pcb somewhere and copy them on activation. 490 */ 491 movl _tssptr, %ebx 492 movl %edx, %eax /* edx = pcb/tss */ 493 movw %ax, 2(%ebx) /* store bits 0->15 */ 494 roll $16, %eax /* swap upper and lower */ 495 movb %al, 4(%ebx) /* store bits 16->23 */ 496 movb %ah, 7(%ebx) /* store bits 24->31 */ 497 andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ 498#endif 499 500 /* update common_tss.tss_esp0 pointer */ 501#ifdef SMP 502 GETCPUID(%eax) 503 movl _SMPcommon_tss_ptr(,%eax,4), %eax 504#else 505 movl $_common_tss, %eax 506#endif 507 movl %edx, %ebx /* pcb */ 508 addl $(UPAGES * PAGE_SIZE), %ebx 509 movl %ebx, TSS_ESP0(%eax) 510 511#ifdef TSS_IS_CACHED /* example only */ 512 /* Reload task register to force reload of selector */ 513 movl _tssptr, %ebx 514 andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ 515 movl _gsel_tss, %ebx 516 ltr %bx 517#endif 518 519 /* restore context */ 520 movl PCB_EBX(%edx),%ebx 521 movl PCB_ESP(%edx),%esp 522 movl PCB_EBP(%edx),%ebp 523 movl PCB_ESI(%edx),%esi 524 movl PCB_EDI(%edx),%edi 525 movl PCB_EIP(%edx),%eax 526 movl %eax,(%esp) 527 528#ifdef SMP 529 GETCPUID(%eax) 530 movb %al, P_ONCPU(%ecx) 531#endif 532 SETCURPCB(%edx, %eax) 533 SETCURPROC(%ecx, %eax) 534 535 movb $0,_intr_nesting_level 536#ifdef SMP 537 movl _apic_base, %eax /* base addr of LOCAL APIC */ 538#if defined(TEST_LOPRIO) 539 pushl %edx 540 movl APIC_TPR(%eax), %edx /* get TPR register contents */ 541 andl $~0xff, %edx /* clear the prio field */ 542 movl %edx, APIC_TPR(%eax) /* now hold loprio for INTs */ 543 popl %edx 544#endif /* TEST_LOPRIO */ 545 movl APIC_ID(%eax), %eax /* APIC ID register */ 546 andl $APIC_ID_MASK, %eax /* extract ID portion */ 547 orl PCB_MPNEST(%edx), %eax /* add count from PROC */ 548 movl %eax, _mp_lock /* load the mp_lock */ 549#endif /* SMP */ 550 551#ifdef USER_LDT 552 cmpl $0, PCB_USERLDT(%edx) 553 jnz 1f 554 movl __default_ldt,%eax 555 cmpl _currentldt,%eax 556 je 2f 557 lldt __default_ldt 558 movl %eax,_currentldt 559 jmp 2f 5601: pushl %edx 561 call _set_user_ldt 562 popl %edx 5632: 564#endif 565 566 sti 567 ret 568 569CROSSJUMPTARGET(idqr) 570CROSSJUMPTARGET(nortqr) 571CROSSJUMPTARGET(sw1a) 572 573badsw: 574 pushl $sw0 575 call _panic 576 577sw0: .asciz "cpu_switch" 578 579/* 580 * savectx(pcb) 581 * Update pcb, saving current processor state. 582 */ 583ENTRY(savectx) 584 /* fetch PCB */ 585 movl 4(%esp),%ecx 586 587 /* caller's return address - child won't execute this routine */ 588 movl (%esp),%eax 589 movl %eax,PCB_EIP(%ecx) 590 591 movl %ebx,PCB_EBX(%ecx) 592 movl %esp,PCB_ESP(%ecx) 593 movl %ebp,PCB_EBP(%ecx) 594 movl %esi,PCB_ESI(%ecx) 595 movl %edi,PCB_EDI(%ecx) 596 597#if NNPX > 0 598 /* 599 * If npxproc == NULL, then the npx h/w state is irrelevant and the 600 * state had better already be in the pcb. This is true for forks 601 * but not for dumps (the old book-keeping with FP flags in the pcb 602 * always lost for dumps because the dump pcb has 0 flags). 603 * 604 * If npxproc != NULL, then we have to save the npx h/w state to 605 * npxproc's pcb and copy it to the requested pcb, or save to the 606 * requested pcb and reload. Copying is easier because we would 607 * have to handle h/w bugs for reloading. We used to lose the 608 * parent's npx state for forks by forgetting to reload. 609 */ 610 GETNPXPROC(%eax) 611 testl %eax,%eax 612 je 1f 613 614 pushl %ecx 615 movl P_ADDR(%eax),%eax 616 leal PCB_SAVEFPU(%eax),%eax 617 pushl %eax 618 pushl %eax 619 call _npxsave 620 addl $4,%esp 621 popl %eax 622 popl %ecx 623 624 pushl $PCB_SAVEFPU_SIZE 625 leal PCB_SAVEFPU(%ecx),%ecx 626 pushl %ecx 627 pushl %eax 628 call _bcopy 629 addl $12,%esp 630#endif /* NNPX > 0 */ 631 6321: 633 ret 634