cpu_switch.S revision 69536
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $FreeBSD: head/sys/amd64/amd64/cpu_switch.S 69536 2000-12-03 01:09:59Z jake $ 37 */ 38 39#include "npx.h" 40#include "opt_user_ldt.h" 41 42#include <sys/rtprio.h> 43 44#include <machine/asmacros.h> 45#include <machine/ipl.h> 46 47#ifdef SMP 48#include <machine/pmap.h> 49#include <machine/apic.h> 50#include <machine/smptests.h> /** GRAB_LOPRIO */ 51#include <machine/lock.h> 52#endif /* SMP */ 53 54#include "assym.s" 55 56 57/*****************************************************************************/ 58/* Scheduling */ 59/*****************************************************************************/ 60 61 .data 62 63 .globl _panic 64 65#if defined(SWTCH_OPTIM_STATS) 66 .globl _swtch_optim_stats, _tlb_flush_count 67_swtch_optim_stats: .long 0 /* number of _swtch_optims */ 68_tlb_flush_count: .long 0 69#endif 70 71 .text 72 73/* 74 * cpu_throw() 75 */ 76ENTRY(cpu_throw) 77 jmp sw1 78 79/* 80 * cpu_switch() 81 */ 82ENTRY(cpu_switch) 83 84 /* switch to new process. first, save context as needed */ 85 movl _curproc,%ecx 86 87 /* if no process to save, don't bother */ 88 testl %ecx,%ecx 89 jz sw1 90 91#ifdef SMP 92 movb P_ONCPU(%ecx), %al /* save "last" cpu */ 93 movb %al, P_LASTCPU(%ecx) 94 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ 95#endif /* SMP */ 96 movl P_VMSPACE(%ecx), %edx 97#ifdef SMP 98 movl _cpuid, %eax 99#else 100 xorl %eax, %eax 101#endif /* SMP */ 102 btrl %eax, VM_PMAP+PM_ACTIVE(%edx) 103 104 movl P_ADDR(%ecx),%edx 105 106 popl PCB_EIP(%edx) /* Hardware registers */ 107 movl %ebx,PCB_EBX(%edx) 108 movl %esp,PCB_ESP(%edx) 109 movl %ebp,PCB_EBP(%edx) 110 movl %esi,PCB_ESI(%edx) 111 movl %edi,PCB_EDI(%edx) 112 movl %gs,PCB_GS(%edx) 113 114 /* test if debug registers should be saved */ 115 movb PCB_FLAGS(%edx),%al 116 andb $PCB_DBREGS,%al 117 jz 1f /* no, skip over */ 118 movl %dr7,%eax /* yes, do the save */ 119 movl %eax,PCB_DR7(%edx) 120 andl $0x0000ff00, %eax /* disable all watchpoints */ 121 movl %eax,%dr7 122 movl %dr6,%eax 123 movl %eax,PCB_DR6(%edx) 124 movl %dr3,%eax 125 movl %eax,PCB_DR3(%edx) 126 movl %dr2,%eax 127 movl %eax,PCB_DR2(%edx) 128 movl %dr1,%eax 129 movl %eax,PCB_DR1(%edx) 130 movl %dr0,%eax 131 movl %eax,PCB_DR0(%edx) 1321: 133 134 /* save sched_lock recursion count */ 135 movl _sched_lock+MTX_RECURSE,%eax 136 movl %eax,PCB_SCHEDNEST(%edx) 137 138#ifdef SMP 139 /* XXX FIXME: we should be saving the local APIC TPR */ 140#endif /* SMP */ 141 142#if NNPX > 0 143 /* have we used fp, and need a save? */ 144 cmpl %ecx,_npxproc 145 jne 1f 146 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */ 147 pushl %edx 148 call _npxsave /* do it in a big C function */ 149 popl %eax 1501: 151#endif /* NNPX > 0 */ 152 153 /* save is done, now choose a new process */ 154sw1: 155 156#ifdef SMP 157 /* Stop scheduling if smp_active goes zero and we are not BSP */ 158 cmpl $0,_smp_active 159 jne 1f 160 cmpl $0,_cpuid 161 je 1f 162 163 movl _idleproc, %eax 164 jmp sw1b 1651: 166#endif 167 168 /* 169 * Choose a new process to schedule. chooseproc() returns idleproc 170 * if it cannot find another process to run. 171 */ 172sw1a: 173 call _chooseproc /* trash ecx, edx, ret eax*/ 174 175#ifdef INVARIANTS 176 testl %eax,%eax /* no process? */ 177 jz badsw3 /* no, panic */ 178#endif 179sw1b: 180 movl %eax,%ecx 181 182 xorl %eax,%eax 183 andl $~AST_RESCHED,_astpending 184 185#ifdef INVARIANTS 186 cmpb $SRUN,P_STAT(%ecx) 187 jne badsw2 188#endif 189 190 movl P_ADDR(%ecx),%edx 191 192#if defined(SWTCH_OPTIM_STATS) 193 incl _swtch_optim_stats 194#endif 195 /* switch address space */ 196 movl %cr3,%ebx 197 cmpl PCB_CR3(%edx),%ebx 198 je 4f 199#if defined(SWTCH_OPTIM_STATS) 200 decl _swtch_optim_stats 201 incl _tlb_flush_count 202#endif 203 movl PCB_CR3(%edx),%ebx 204 movl %ebx,%cr3 2054: 206 207#ifdef SMP 208 movl _cpuid, %esi 209#else 210 xorl %esi, %esi 211#endif 212 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ 213 je 1f 214 btsl %esi, _private_tss /* mark use of private tss */ 215 movl PCB_EXT(%edx), %edi /* new tss descriptor */ 216 jmp 2f 2171: 218 219 /* update common_tss.tss_esp0 pointer */ 220 movl %edx, %ebx /* pcb */ 221 addl $(UPAGES * PAGE_SIZE - 16), %ebx 222 movl %ebx, _common_tss + TSS_ESP0 223 224 btrl %esi, _private_tss 225 jae 3f 226#ifdef SMP 227 movl $gd_common_tssd, %edi 228 addl %fs:0, %edi 229#else 230 movl $_common_tssd, %edi 231#endif 2322: 233 /* move correct tss descriptor into GDT slot, then reload tr */ 234 movl _tss_gdt, %ebx /* entry in GDT */ 235 movl 0(%edi), %eax 236 movl %eax, 0(%ebx) 237 movl 4(%edi), %eax 238 movl %eax, 4(%ebx) 239 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 240 ltr %si 2413: 242 movl P_VMSPACE(%ecx), %ebx 243#ifdef SMP 244 movl _cpuid, %eax 245#else 246 xorl %eax, %eax 247#endif 248 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx) 249 250 /* restore context */ 251 movl PCB_EBX(%edx),%ebx 252 movl PCB_ESP(%edx),%esp 253 movl PCB_EBP(%edx),%ebp 254 movl PCB_ESI(%edx),%esi 255 movl PCB_EDI(%edx),%edi 256 pushl PCB_EIP(%edx) 257 258#ifdef SMP 259#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */ 260#ifdef CHEAP_TPR 261 movl $0, lapic_tpr 262#else 263 andl $~APIC_TPR_PRIO, lapic_tpr 264#endif /** CHEAP_TPR */ 265#endif /** GRAB_LOPRIO */ 266 movl _cpuid,%eax 267 movb %al, P_ONCPU(%ecx) 268#endif /* SMP */ 269 movl %edx, _curpcb 270 movl %ecx, _curproc /* into next process */ 271 272#ifdef SMP 273 /* XXX FIXME: we should be restoring the local APIC TPR */ 274#endif /* SMP */ 275 276#ifdef USER_LDT 277 cmpl $0, PCB_USERLDT(%edx) 278 jnz 1f 279 movl __default_ldt,%eax 280 cmpl _currentldt,%eax 281 je 2f 282 lldt __default_ldt 283 movl %eax,_currentldt 284 jmp 2f 2851: pushl %edx 286 call _set_user_ldt 287 popl %edx 2882: 289#endif 290 291 /* This must be done after loading the user LDT. */ 292 .globl cpu_switch_load_gs 293cpu_switch_load_gs: 294 movl PCB_GS(%edx),%gs 295 296 /* test if debug regisers should be restored */ 297 movb PCB_FLAGS(%edx),%al 298 andb $PCB_DBREGS,%al 299 jz 1f /* no, skip over */ 300 movl PCB_DR6(%edx),%eax /* yes, do the restore */ 301 movl %eax,%dr6 302 movl PCB_DR3(%edx),%eax 303 movl %eax,%dr3 304 movl PCB_DR2(%edx),%eax 305 movl %eax,%dr2 306 movl PCB_DR1(%edx),%eax 307 movl %eax,%dr1 308 movl PCB_DR0(%edx),%eax 309 movl %eax,%dr0 310 movl PCB_DR7(%edx),%eax 311 movl %eax,%dr7 3121: 313 314 /* 315 * restore sched_lock recursion count and transfer ownership to 316 * new process 317 */ 318 movl PCB_SCHEDNEST(%edx),%eax 319 movl %eax,_sched_lock+MTX_RECURSE 320 321 movl _curproc,%eax 322 movl %eax,_sched_lock+MTX_LOCK 323 324 ret 325 326CROSSJUMPTARGET(sw1a) 327 328#ifdef INVARIANTS 329badsw2: 330 pushl $sw0_2 331 call _panic 332 333sw0_2: .asciz "cpu_switch: not SRUN" 334 335badsw3: 336 pushl $sw0_3 337 call _panic 338 339sw0_3: .asciz "cpu_switch: chooseproc returned NULL" 340#endif 341 342/* 343 * savectx(pcb) 344 * Update pcb, saving current processor state. 345 */ 346ENTRY(savectx) 347 /* fetch PCB */ 348 movl 4(%esp),%ecx 349 350 /* caller's return address - child won't execute this routine */ 351 movl (%esp),%eax 352 movl %eax,PCB_EIP(%ecx) 353 354 movl %cr3,%eax 355 movl %eax,PCB_CR3(%ecx) 356 357 movl %ebx,PCB_EBX(%ecx) 358 movl %esp,PCB_ESP(%ecx) 359 movl %ebp,PCB_EBP(%ecx) 360 movl %esi,PCB_ESI(%ecx) 361 movl %edi,PCB_EDI(%ecx) 362 movl %gs,PCB_GS(%ecx) 363 364#if NNPX > 0 365 /* 366 * If npxproc == NULL, then the npx h/w state is irrelevant and the 367 * state had better already be in the pcb. This is true for forks 368 * but not for dumps (the old book-keeping with FP flags in the pcb 369 * always lost for dumps because the dump pcb has 0 flags). 370 * 371 * If npxproc != NULL, then we have to save the npx h/w state to 372 * npxproc's pcb and copy it to the requested pcb, or save to the 373 * requested pcb and reload. Copying is easier because we would 374 * have to handle h/w bugs for reloading. We used to lose the 375 * parent's npx state for forks by forgetting to reload. 376 */ 377 movl _npxproc,%eax 378 testl %eax,%eax 379 je 1f 380 381 pushl %ecx 382 movl P_ADDR(%eax),%eax 383 leal PCB_SAVEFPU(%eax),%eax 384 pushl %eax 385 pushl %eax 386 call _npxsave 387 addl $4,%esp 388 popl %eax 389 popl %ecx 390 391 pushl $PCB_SAVEFPU_SIZE 392 leal PCB_SAVEFPU(%ecx),%ecx 393 pushl %ecx 394 pushl %eax 395 call _bcopy 396 addl $12,%esp 397#endif /* NNPX > 0 */ 398 3991: 400 ret 401