swtch.S revision 284115
1/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ 2 3/*- 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37/*- 38 * Copyright (c) 1994-1998 Mark Brinicombe. 39 * Copyright (c) 1994 Brini. 40 * All rights reserved. 41 * 42 * This code is derived from software written for Brini by Mark Brinicombe 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Brini. 55 * 4. The name of the company nor the name of the author may be used to 56 * endorse or promote products derived from this software without specific 57 * prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * RiscBSD kernel project 72 * 73 * cpuswitch.S 74 * 75 * cpu switching functions 76 * 77 * Created : 15/10/94 78 * 79 */ 80 81#include "assym.s" 82#include "opt_sched.h" 83 84#include <machine/acle-compat.h> 85#include <machine/asm.h> 86#include <machine/asmacros.h> 87#include <machine/armreg.h> 88#include <machine/vfp.h> 89 90__FBSDID("$FreeBSD: head/sys/arm/arm/swtch.S 284115 2015-06-07 13:59:02Z andrew $"); 91 92#if __ARM_ARCH >= 6 && defined(SMP) 93#define GET_PCPU(tmp, tmp2) \ 94 mrc p15, 0, tmp, c0, c0, 5; \ 95 and tmp, tmp, #0xf; \ 96 ldr tmp2, .Lcurpcpu+4; \ 97 mul tmp, tmp, tmp2; \ 98 ldr tmp2, .Lcurpcpu; \ 99 add tmp, tmp, tmp2; 100#else 101 102#define GET_PCPU(tmp, tmp2) \ 103 ldr tmp, .Lcurpcpu 104#endif 105 106#ifdef VFP 107 .fpu vfp /* allow VFP instructions */ 108#endif 109 110.Lcurpcpu: 111 .word _C_LABEL(__pcpu) 112 .word PCPU_SIZE 113.Lblocked_lock: 114 .word _C_LABEL(blocked_lock) 115 116 117#ifndef ARM_NEW_PMAP 118 119#define DOMAIN_CLIENT 0x01 120 121.Lcpufuncs: 122 .word _C_LABEL(cpufuncs) 123 124/* 125 * cpu_throw(oldtd, newtd) 126 * 127 * Remove current thread state, then select the next thread to run 128 * and load its state. 129 * r0 = oldtd 130 * r1 = newtd 131 */ 132ENTRY(cpu_throw) 133 mov r5, r1 134 135 /* 136 * r0 = oldtd 137 * r5 = newtd 138 */ 139 140#ifdef VFP /* This thread is dying, disable */ 141 bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ 142#endif 143 144 GET_PCPU(r7, r9) 145 ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ 146 147 /* Switch to lwp0 context */ 148 149 ldr r9, .Lcpufuncs 150#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) 151 mov lr, pc 152 ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] 153#endif 154 ldr r0, [r7, #(PCB_PL1VEC)] 155 ldr r1, [r7, #(PCB_DACR)] 156 /* 157 * r0 = Pointer to L1 slot for vector_page (or NULL) 158 * r1 = lwp0's DACR 159 * r5 = lwp0 160 * r7 = lwp0's PCB 161 * r9 = cpufuncs 162 */ 163 164 /* 165 * Ensure the vector table is accessible by fixing up lwp0's L1 166 */ 167 cmp r0, #0 /* No need to fixup vector table? */ 168 ldrne r3, [r0] /* But if yes, fetch current value */ 169 ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ 170 mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */ 171 cmpne r3, r2 /* Stuffing the same value? */ 172 strne r2, [r0] /* Store if not. */ 173 174#ifdef PMAP_INCLUDE_PTE_SYNC 175 /* 176 * Need to sync the cache to make sure that last store is 177 * visible to the MMU. 178 */ 179 movne r1, #4 180 movne lr, pc 181 ldrne pc, [r9, #CF_DCACHE_WB_RANGE] 182#endif /* PMAP_INCLUDE_PTE_SYNC */ 183 184 /* 185 * Note: We don't do the same optimisation as cpu_switch() with 186 * respect to avoiding flushing the TLB if we're switching to 187 * the same L1 since this process' VM space may be about to go 188 * away, so we don't want *any* turds left in the TLB. 189 */ 190 191 /* Switch the memory to the new process */ 192 ldr r0, [r7, #(PCB_PAGEDIR)] 193 mov lr, pc 194 ldr pc, [r9, #CF_CONTEXT_SWITCH] 195 196 GET_PCPU(r6, r4) 197 /* Hook in a new pcb */ 198 str r7, [r6, #PC_CURPCB] 199 /* We have a new curthread now so make a note it */ 200 str r5, [r6, #PC_CURTHREAD] 201#if __ARM_ARCH >= 6 202 mcr p15, 0, r5, c13, c0, 4 203#endif 204 /* Set the new tp */ 205 ldr r6, [r5, #(TD_MD + MD_TP)] 206#if __ARM_ARCH >= 6 207 mcr p15, 0, r6, c13, c0, 3 208#else 209 ldr r4, =ARM_TP_ADDRESS 210 str r6, [r4] 211 ldr r6, [r5, #(TD_MD + MD_RAS_START)] 212 str r6, [r4, #4] /* ARM_RAS_START */ 213 ldr r6, [r5, #(TD_MD + MD_RAS_END)] 214 str r6, [r4, #8] /* ARM_RAS_END */ 215#endif 216 /* Restore all the saved registers and exit */ 217 add r3, r7, #PCB_R4 218 ldmia r3, {r4-r12, sp, pc} 219END(cpu_throw) 220 221/* 222 * cpu_switch(oldtd, newtd, lock) 223 * 224 * Save the current thread state, then select the next thread to run 225 * and load its state. 226 * r0 = oldtd 227 * r1 = newtd 228 * r2 = lock (new lock for old thread) 229 */ 230ENTRY(cpu_switch) 231 /* Interrupts are disabled. */ 232 /* Save all the registers in the old thread's pcb. */ 233 ldr r3, [r0, #(TD_PCB)] 234 235 /* Restore all the saved registers and exit */ 236 add r3, #(PCB_R4) 237 stmia r3, {r4-r12, sp, lr, pc} 238 239 mov r6, r2 /* Save the mutex */ 240 241 /* rem: r0 = old lwp */ 242 /* rem: interrupts are disabled */ 243 244 /* Process is now on a processor. */ 245 /* We have a new curthread now so make a note it */ 246 GET_PCPU(r7, r2) 247 str r1, [r7, #PC_CURTHREAD] 248#if __ARM_ARCH >= 6 249 mcr p15, 0, r1, c13, c0, 4 250#endif 251 252 /* Hook in a new pcb */ 253 ldr r2, [r1, #TD_PCB] 254 str r2, [r7, #PC_CURPCB] 255 256 /* Stage two : Save old context */ 257 258 /* Get the user structure for the old thread. */ 259 ldr r2, [r0, #(TD_PCB)] 260 mov r4, r0 /* Save the old thread. */ 261 262#if __ARM_ARCH >= 6 263 /* 264 * Set new tp. No need to store the old one first, userland can't 265 * change it directly on armv6. 266 */ 267 ldr r9, [r1, #(TD_MD + MD_TP)] 268 mcr p15, 0, r9, c13, c0, 3 269#else 270 /* Store the old tp; userland can change it on armv4. */ 271 ldr r3, =ARM_TP_ADDRESS 272 ldr r9, [r3] 273 str r9, [r0, #(TD_MD + MD_TP)] 274 ldr r9, [r3, #4] 275 str r9, [r0, #(TD_MD + MD_RAS_START)] 276 ldr r9, [r3, #8] 277 str r9, [r0, #(TD_MD + MD_RAS_END)] 278 279 /* Set the new tp */ 280 ldr r9, [r1, #(TD_MD + MD_TP)] 281 str r9, [r3] 282 ldr r9, [r1, #(TD_MD + MD_RAS_START)] 283 str r9, [r3, #4] 284 ldr r9, [r1, #(TD_MD + MD_RAS_END)] 285 str r9, [r3, #8] 286#endif 287 288 /* Get the user structure for the new process in r9 */ 289 ldr r9, [r1, #(TD_PCB)] 290 291 /* rem: r2 = old PCB */ 292 /* rem: r9 = new PCB */ 293 /* rem: interrupts are enabled */ 294 295#ifdef VFP 296 fmrx r0, fpexc /* If the VFP is enabled */ 297 tst r0, #(VFPEXC_EN) /* the current thread has */ 298 movne r1, #1 /* used it, so go save */ 299 addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */ 300 blne _C_LABEL(vfp_store) /* and disable the VFP. */ 301#endif 302 303 /* r0-r3 now free! */ 304 305 /* Third phase : restore saved context */ 306 307 /* rem: r2 = old PCB */ 308 /* rem: r9 = new PCB */ 309 310 ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */ 311 mov r2, #DOMAIN_CLIENT 312 cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ 313 beq .Lcs_context_switched /* Yup. Don't flush cache */ 314 mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */ 315 /* 316 * Get the new L1 table pointer into r11. If we're switching to 317 * an LWP with the same address space as the outgoing one, we can 318 * skip the cache purge and the TTB load. 319 * 320 * To avoid data dep stalls that would happen anyway, we try 321 * and get some useful work done in the mean time. 322 */ 323 mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ 324 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ 325 326 teq r10, r11 /* Same L1? */ 327 cmpeq r0, r5 /* Same DACR? */ 328 beq .Lcs_context_switched /* yes! */ 329 330#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) 331 /* 332 * Definately need to flush the cache. 333 */ 334 335 ldr r1, .Lcpufuncs 336 mov lr, pc 337 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] 338#endif 339.Lcs_cache_purge_skipped: 340 /* rem: r6 = lock */ 341 /* rem: r9 = new PCB */ 342 /* rem: r10 = old L1 */ 343 /* rem: r11 = new L1 */ 344 345 mov r2, #0x00000000 346 ldr r7, [r9, #(PCB_PL1VEC)] 347 348 /* 349 * Ensure the vector table is accessible by fixing up the L1 350 */ 351 cmp r7, #0 /* No need to fixup vector table? */ 352 ldrne r2, [r7] /* But if yes, fetch current value */ 353 ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ 354 mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ 355 cmpne r2, r0 /* Stuffing the same value? */ 356#ifndef PMAP_INCLUDE_PTE_SYNC 357 strne r0, [r7] /* Nope, update it */ 358#else 359 beq .Lcs_same_vector 360 str r0, [r7] /* Otherwise, update it */ 361 362 /* 363 * Need to sync the cache to make sure that last store is 364 * visible to the MMU. 365 */ 366 ldr r2, .Lcpufuncs 367 mov r0, r7 368 mov r1, #4 369 mov lr, pc 370 ldr pc, [r2, #CF_DCACHE_WB_RANGE] 371 372.Lcs_same_vector: 373#endif /* PMAP_INCLUDE_PTE_SYNC */ 374 375 cmp r10, r11 /* Switching to the same L1? */ 376 ldr r10, .Lcpufuncs 377 beq .Lcs_same_l1 /* Yup. */ 378 /* 379 * Do a full context switch, including full TLB flush. 380 */ 381 mov r0, r11 382 mov lr, pc 383 ldr pc, [r10, #CF_CONTEXT_SWITCH] 384 385 b .Lcs_context_switched 386 387 /* 388 * We're switching to a different process in the same L1. 389 * In this situation, we only need to flush the TLB for the 390 * vector_page mapping, and even then only if r7 is non-NULL. 391 */ 392.Lcs_same_l1: 393 cmp r7, #0 394 movne r0, #0 /* We *know* vector_page's VA is 0x0 */ 395 movne lr, pc 396 ldrne pc, [r10, #CF_TLB_FLUSHID_SE] 397 398.Lcs_context_switched: 399 400 /* Release the old thread */ 401 str r6, [r4, #TD_LOCK] 402#if defined(SCHED_ULE) && defined(SMP) 403 ldr r6, .Lblocked_lock 404 GET_CURTHREAD_PTR(r3) 4051: 406 ldr r4, [r3, #TD_LOCK] 407 cmp r4, r6 408 beq 1b 409#endif 410 411 /* XXXSCW: Safe to re-enable FIQs here */ 412 413 /* rem: r9 = new PCB */ 414 415 /* Restore all the saved registers and exit */ 416 add r3, r9, #PCB_R4 417 ldmia r3, {r4-r12, sp, pc} 418END(cpu_switch) 419 420 421#else /* !ARM_NEW_PMAP */ 422#include <machine/sysreg.h> 423 424ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */ 425 DSB 426 mcr CP15_TTBR0(r0) /* set the new TTB */ 427 ISB 428 mov r0, #(CPU_ASID_KERNEL) 429 mcr CP15_TLBIASID(r0) /* flush not global TLBs */ 430 /* 431 * Flush entire Branch Target Cache because of the branch predictor 432 * is not architecturally invisible. See ARM Architecture Reference 433 * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch 434 * predictors and Requirements for branch predictor maintenance 435 * operations sections. 436 * 437 * QQQ: The predictor is virtually addressed and holds virtual target 438 * addresses. Therefore, if mapping is changed, the predictor cache 439 * must be flushed.The flush is part of entire i-cache invalidation 440 * what is always called when code mapping is changed. So herein, 441 * it's the only place where standalone predictor flush must be 442 * executed in kernel (except self modifying code case). 443 */ 444 mcr CP15_BPIALL /* and flush entire Branch Target Cache */ 445 DSB 446 mov pc, lr 447END(cpu_context_switch) 448 449/* 450 * cpu_throw(oldtd, newtd) 451 * 452 * Remove current thread state, then select the next thread to run 453 * and load its state. 454 * r0 = oldtd 455 * r1 = newtd 456 */ 457ENTRY(cpu_throw) 458 mov r10, r0 /* r10 = oldtd */ 459 mov r11, r1 /* r11 = newtd */ 460 461#ifdef VFP /* This thread is dying, disable */ 462 bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ 463#endif 464 GET_PCPU(r8, r9) /* r8 = current pcpu */ 465 ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */ 466 467 cmp r10, #0 /* old thread? */ 468 beq 2f /* no, skip */ 469 470 /* Remove this CPU from the active list. */ 471 ldr r5, [r8, #PC_CURPMAP] 472 mov r0, #(PM_ACTIVE) 473 add r5, r0 /* r5 = old pm_active */ 474 475 /* Compute position and mask. */ 476#if _NCPUWORDS > 1 477 lsr r0, r4, #3 478 bic r0, #3 479 add r5, r0 /* r5 = position in old pm_active */ 480 mov r2, #1 481 and r0, r4, #31 482 lsl r2, r0 /* r2 = mask */ 483#else 484 mov r2, #1 485 lsl r2, r4 /* r2 = mask */ 486#endif 487 /* Clear cpu from old active list. */ 488#ifdef SMP 4891: ldrex r0, [r5] 490 bic r0, r2 491 strex r1, r0, [r5] 492 teq r1, #0 493 bne 1b 494#else 495 ldr r0, [r5] 496 bic r0, r2 497 str r0, [r5] 498#endif 499 5002: 501#ifdef INVARIANTS 502 cmp r11, #0 /* new thread? */ 503 beq badsw1 /* no, panic */ 504#endif 505 ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */ 506 507 /* 508 * Registers at this point 509 * r4 = current cpu id 510 * r7 = new PCB 511 * r8 = current pcpu 512 * r11 = newtd 513 */ 514 515 /* MMU switch to new thread. */ 516 ldr r0, [r7, #(PCB_PAGEDIR)] 517#ifdef INVARIANTS 518 cmp r0, #0 /* new thread? */ 519 beq badsw4 /* no, panic */ 520#endif 521 bl _C_LABEL(cpu_context_switch) 522 523 /* 524 * Set new PMAP as current one. 525 * Insert cpu to new active list. 526 */ 527 528 ldr r6, [r11, #(TD_PROC)] /* newtd->proc */ 529 ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */ 530 add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ 531 str r6, [r8, #PC_CURPMAP] /* store to curpmap */ 532 533 mov r0, #PM_ACTIVE 534 add r6, r0 /* r6 = new pm_active */ 535 536 /* compute position and mask */ 537#if _NCPUWORDS > 1 538 lsr r0, r4, #3 539 bic r0, #3 540 add r6, r0 /* r6 = position in new pm_active */ 541 mov r2, #1 542 and r0, r4, #31 543 lsl r2, r0 /* r2 = mask */ 544#else 545 mov r2, #1 546 lsl r2, r4 /* r2 = mask */ 547#endif 548 /* Set cpu to new active list. */ 549#ifdef SMP 5501: ldrex r0, [r6] 551 orr r0, r2 552 strex r1, r0, [r6] 553 teq r1, #0 554 bne 1b 555#else 556 ldr r0, [r6] 557 orr r0, r2 558 str r0, [r6] 559#endif 560 /* 561 * Registers at this point. 562 * r7 = new PCB 563 * r8 = current pcpu 564 * r11 = newtd 565 * They must match the ones in sw1 position !!! 566 */ 567 DMB 568 b sw1 /* share new thread init with cpu_switch() */ 569END(cpu_throw) 570 571/* 572 * cpu_switch(oldtd, newtd, lock) 573 * 574 * Save the current thread state, then select the next thread to run 575 * and load its state. 576 * r0 = oldtd 577 * r1 = newtd 578 * r2 = lock (new lock for old thread) 579 */ 580ENTRY(cpu_switch) 581 /* Interrupts are disabled. */ 582#ifdef INVARIANTS 583 cmp r0, #0 /* old thread? */ 584 beq badsw2 /* no, panic */ 585#endif 586 /* Save all the registers in the old thread's pcb. */ 587 ldr r3, [r0, #(TD_PCB)] 588 add r3, #(PCB_R4) 589 stmia r3, {r4-r12, sp, lr, pc} 590 591#ifdef INVARIANTS 592 cmp r1, #0 /* new thread? */ 593 beq badsw3 /* no, panic */ 594#endif 595 /* 596 * Save arguments. Note that we can now use r0-r14 until 597 * it is time to restore them for the new thread. However, 598 * some registers are not safe over function call. 599 */ 600 mov r9, r2 /* r9 = lock */ 601 mov r10, r0 /* r10 = oldtd */ 602 mov r11, r1 /* r11 = newtd */ 603 604 GET_PCPU(r8, r3) /* r8 = current PCPU */ 605 ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */ 606 607 608 609#ifdef VFP 610 ldr r3, [r10, #(TD_PCB)] 611 fmrx r0, fpexc /* If the VFP is enabled */ 612 tst r0, #(VFPEXC_EN) /* the current thread has */ 613 movne r1, #1 /* used it, so go save */ 614 addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */ 615 blne _C_LABEL(vfp_store) /* and disable the VFP. */ 616#endif 617 618 /* 619 * MMU switch. If we're switching to a thread with the same 620 * address space as the outgoing one, we can skip the MMU switch. 621 */ 622 mrc CP15_TTBR0(r1) /* r1 = old TTB */ 623 ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */ 624 cmp r0, r1 /* Switching to the TTB? */ 625 beq sw0 /* same TTB, skip */ 626 627#ifdef INVARIANTS 628 cmp r0, #0 /* new thread? */ 629 beq badsw4 /* no, panic */ 630#endif 631 632 bl cpu_context_switch /* new TTB as argument */ 633 634 /* 635 * Registers at this point 636 * r7 = new PCB 637 * r8 = current pcpu 638 * r9 = lock 639 * r10 = oldtd 640 * r11 = newtd 641 */ 642 643 /* 644 * Set new PMAP as current one. 645 * Update active list on PMAPs. 646 */ 647 ldr r6, [r11, #TD_PROC] /* newtd->proc */ 648 ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */ 649 add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ 650 651 ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */ 652 str r6, [r8, #PC_CURPMAP] /* and save new one */ 653 654 mov r0, #PM_ACTIVE 655 add r5, r0 /* r5 = old pm_active */ 656 add r6, r0 /* r6 = new pm_active */ 657 658 /* Compute position and mask. */ 659 ldr r4, [r8, #PC_CPUID] 660#if _NCPUWORDS > 1 661 lsr r0, r4, #3 662 bic r0, #3 663 add r5, r0 /* r5 = position in old pm_active */ 664 add r6, r0 /* r6 = position in new pm_active */ 665 mov r2, #1 666 and r0, r4, #31 667 lsl r2, r0 /* r2 = mask */ 668#else 669 mov r2, #1 670 lsl r2, r4 /* r2 = mask */ 671#endif 672 /* Clear cpu from old active list. */ 673#ifdef SMP 6741: ldrex r0, [r5] 675 bic r0, r2 676 strex r1, r0, [r5] 677 teq r1, #0 678 bne 1b 679#else 680 ldr r0, [r5] 681 bic r0, r2 682 str r0, [r5] 683#endif 684 /* Set cpu to new active list. */ 685#ifdef SMP 6861: ldrex r0, [r6] 687 orr r0, r2 688 strex r1, r0, [r6] 689 teq r1, #0 690 bne 1b 691#else 692 ldr r0, [r6] 693 orr r0, r2 694 str r0, [r6] 695#endif 696 697sw0: 698 /* 699 * Registers at this point 700 * r7 = new PCB 701 * r8 = current pcpu 702 * r9 = lock 703 * r10 = oldtd 704 * r11 = newtd 705 */ 706 707 /* Change the old thread lock. */ 708 add r5, r10, #TD_LOCK 709 DMB 7101: ldrex r0, [r5] 711 strex r1, r9, [r5] 712 teq r1, #0 713 bne 1b 714 DMB 715 716sw1: 717 clrex 718 /* 719 * Registers at this point 720 * r7 = new PCB 721 * r8 = current pcpu 722 * r11 = newtd 723 */ 724 725#if defined(SMP) && defined(SCHED_ULE) 726 /* 727 * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE 728 * QQQ: What does it mean in reality and why is it done? 729 */ 730 ldr r6, =blocked_lock 7311: 732 ldr r3, [r11, #TD_LOCK] /* atomic write regular read */ 733 cmp r3, r6 734 beq 1b 735#endif 736 /* Set the new tls */ 737 ldr r0, [r11, #(TD_MD + MD_TP)] 738 mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */ 739 740 /* We have a new curthread now so make a note it */ 741 str r11, [r8, #PC_CURTHREAD] 742 mcr CP15_TPIDRPRW(r11) 743 744 /* store pcb in per cpu structure */ 745 str r7, [r8, #PC_CURPCB] 746 747 /* 748 * Restore all saved registers and return. Note that some saved 749 * registers can be changed when either cpu_fork(), cpu_set_upcall(), 750 * cpu_set_fork_handler(), or makectx() was called. 751 */ 752 add r3, r7, #PCB_R4 753 ldmia r3, {r4-r12, sp, pc} 754 755#ifdef INVARIANTS 756badsw1: 757 ldr r0, =sw1_panic_str 758 bl _C_LABEL(panic) 7591: nop 760 b 1b 761 762badsw2: 763 ldr r0, =sw2_panic_str 764 bl _C_LABEL(panic) 7651: nop 766 b 1b 767 768badsw3: 769 ldr r0, =sw3_panic_str 770 bl _C_LABEL(panic) 7711: nop 772 b 1b 773 774badsw4: 775 ldr r0, =sw4_panic_str 776 bl _C_LABEL(panic) 7771: nop 778 b 1b 779 780sw1_panic_str: 781 .asciz "cpu_throw: no newthread supplied.\n" 782sw2_panic_str: 783 .asciz "cpu_switch: no curthread supplied.\n" 784sw3_panic_str: 785 .asciz "cpu_switch: no newthread supplied.\n" 786sw4_panic_str: 787 .asciz "cpu_switch: new pagedir is NULL.\n" 788#endif 789END(cpu_switch) 790 791 792#endif /* !ARM_NEW_PMAP */ 793 794ENTRY(savectx) 795 stmfd sp!, {lr} 796 sub sp, sp, #4 797 798 /* Store all the registers in the thread's pcb */ 799 add r3, r0, #(PCB_R4) 800 stmia r3, {r4-r12, sp, lr, pc} 801#ifdef VFP 802 fmrx r2, fpexc /* If the VFP is enabled */ 803 tst r2, #(VFPEXC_EN) /* the current thread has */ 804 movne r1, #1 /* used it, so go save */ 805 addne r0, r0, #(PCB_VFPSTATE) /* the state into the PCB */ 806 blne _C_LABEL(vfp_store) /* and disable the VFP. */ 807#endif 808 add sp, sp, #4; 809 ldmfd sp!, {pc} 810END(savectx) 811 812ENTRY(fork_trampoline) 813 STOP_UNWINDING /* EABI: Don't unwind beyond the thread enty point. */ 814 mov fp, #0 /* OABI: Stack traceback via fp stops here. */ 815 mov r2, sp 816 mov r1, r5 817 mov r0, r4 818 ldr lr, =swi_exit /* Go finish forking, then return */ 819 b _C_LABEL(fork_exit) /* to userland via swi_exit code. */ 820END(fork_trampoline) 821 822