swtch.S revision 287536
1/*- 2 * Copyright (c) 2014 Andrew Turner 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Andrew Turner under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32#include "assym.s" 33#include "opt_kstack_pages.h" 34#include "opt_sched.h" 35 36#include <machine/asm.h> 37 38__FBSDID("$FreeBSD: head/sys/arm64/arm64/swtch.S 287536 2015-09-07 14:01:18Z andrew $"); 39 40/* 41 * void cpu_throw(struct thread *old, struct thread *new) 42 */ 43ENTRY(cpu_throw) 44#ifdef VFP 45 /* Backup the new thread pointer around a call to C code */ 46 mov x19, x1 47 bl vfp_discard 48 mov x1, x19 49#endif 50 51 /* Store the new curthread */ 52 str x1, [x18, #PC_CURTHREAD] 53 /* And the new pcb */ 54 ldr x4, [x1, #TD_PCB] 55 str x4, [x18, #PC_CURPCB] 56 57 /* 58 * TODO: We may need to flush the cache here. 59 */ 60 61 /* Switch to the new pmap */ 62 ldr x5, [x4, #PCB_L1ADDR] 63 msr ttbr0_el1, x5 64 isb 65 66 /* Invalidate the TLB */ 67 dsb sy 68 tlbi vmalle1is 69 dsb sy 70 isb 71 72 /* Restore the registers */ 73 ldp x5, x6, [x4, #PCB_SP] 74 mov sp, x5 75 msr tpidr_el0, x6 76 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] 77 ldp x10, x11, [x4, #PCB_REGS + 10 * 8] 78 ldp x12, x13, [x4, #PCB_REGS + 12 * 8] 79 ldp x14, x15, [x4, #PCB_REGS + 14 * 8] 80 ldp x16, x17, [x4, #PCB_REGS + 16 * 8] 81 ldr x19, [x4, #PCB_REGS + 19 * 8] 82 ldp x20, x21, [x4, #PCB_REGS + 20 * 8] 83 ldp x22, x23, [x4, #PCB_REGS + 22 * 8] 84 ldp x24, x25, [x4, #PCB_REGS + 24 * 8] 85 ldp x26, x27, [x4, #PCB_REGS + 26 * 8] 86 ldp x28, x29, [x4, #PCB_REGS + 28 * 8] 87 ldr x30, [x4, #PCB_REGS + 30 * 8] 88 89 ret 90END(cpu_throw) 91 92/* 93 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) 94 * 95 * x0 = old 96 * x1 = new 97 * x2 = mtx 98 * x3 to x7, x16 and x17 are caller saved 99 */ 100ENTRY(cpu_switch) 101 /* Store the new curthread */ 102 str x1, [x18, #PC_CURTHREAD] 103 /* And the new pcb */ 104 ldr x4, [x1, #TD_PCB] 105 str x4, [x18, #PC_CURPCB] 106 107 /* 108 * Save the old context. 109 */ 110 ldr x4, [x0, #TD_PCB] 111 112 /* Store the callee-saved registers */ 113 stp x8, x9, [x4, #PCB_REGS + 8 * 8] 114 stp x10, x11, [x4, #PCB_REGS + 10 * 8] 115 stp x12, x13, [x4, #PCB_REGS + 12 * 8] 116 stp x14, x15, [x4, #PCB_REGS + 14 * 8] 117 stp x16, x17, [x4, #PCB_REGS + 16 * 8] 118 stp x18, x19, [x4, #PCB_REGS + 18 * 8] 119 stp x20, x21, [x4, #PCB_REGS + 20 * 8] 120 stp x22, x23, [x4, #PCB_REGS + 22 * 8] 121 stp x24, x25, [x4, #PCB_REGS + 24 * 8] 122 stp x26, x27, [x4, #PCB_REGS + 26 * 8] 123 stp x28, x29, [x4, #PCB_REGS + 28 * 8] 124 str x30, [x4, #PCB_REGS + 30 * 8] 125 /* And the old stack pointer */ 126 mov x5, sp 127 mrs x6, tpidr_el0 128 stp x5, x6, [x4, #PCB_SP] 129 130#ifdef VFP 131 mov x19, x0 132 mov x20, x1 133 mov x21, x2 134 /* Load the pcb address */ 135 mov x1, x4 136 bl vfp_save_state 137 mov x2, x21 138 mov x1, x20 139 mov x0, x19 140#endif 141 142 /* 143 * Restore the saved context. 144 */ 145 ldr x4, [x1, #TD_PCB] 146 147 /* 148 * TODO: We may need to flush the cache here if switching 149 * to a user process. 150 */ 151 152 /* Switch to the new pmap */ 153 ldr x5, [x4, #PCB_L1ADDR] 154 msr ttbr0_el1, x5 155 isb 156 157 /* Invalidate the TLB */ 158 dsb sy 159 tlbi vmalle1is 160 dsb sy 161 isb 162 163 /* 164 * Release the old thread. This doesn't need to be a store-release 165 * as the above dsb instruction will provide release semantics. 166 */ 167 str x2, [x0, #TD_LOCK] 168#if defined(SCHED_ULE) && defined(SMP) 169 /* Read the value in blocked_lock */ 170 ldr x0, =_C_LABEL(blocked_lock) 171 ldr x2, [x0] 1721: 173 ldar x3, [x1, #TD_LOCK] 174 cmp x3, x2 175 b.eq 1b 176#endif 177 178 /* Restore the registers */ 179 ldp x5, x6, [x4, #PCB_SP] 180 mov sp, x5 181 msr tpidr_el0, x6 182 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] 183 ldp x10, x11, [x4, #PCB_REGS + 10 * 8] 184 ldp x12, x13, [x4, #PCB_REGS + 12 * 8] 185 ldp x14, x15, [x4, #PCB_REGS + 14 * 8] 186 ldp x16, x17, [x4, #PCB_REGS + 16 * 8] 187 ldr x19, [x4, #PCB_REGS + 19 * 8] 188 ldp x20, x21, [x4, #PCB_REGS + 20 * 8] 189 ldp x22, x23, [x4, #PCB_REGS + 22 * 8] 190 ldp x24, x25, [x4, #PCB_REGS + 24 * 8] 191 ldp x26, x27, [x4, #PCB_REGS + 26 * 8] 192 ldp x28, x29, [x4, #PCB_REGS + 28 * 8] 193 ldr x30, [x4, #PCB_REGS + 30 * 8] 194 195 str xzr, [x4, #PCB_REGS + 18 * 8] 196 ret 197.Lcpu_switch_panic_str: 198 .asciz "cpu_switch: %p\0" 199END(cpu_switch) 200 201ENTRY(fork_trampoline) 202 mov x0, x8 203 mov x1, x9 204 mov x2, sp 205 mov fp, #0 /* Stack traceback stops here. */ 206 bl _C_LABEL(fork_exit) 207 208 /* Restore sp and lr */ 209 ldp x0, x1, [sp] 210 msr sp_el0, x0 211 mov lr, x1 212 213 /* Restore the registers other than x0 and x1 */ 214 ldp x2, x3, [sp, #TF_X + 2 * 8] 215 ldp x4, x5, [sp, #TF_X + 4 * 8] 216 ldp x6, x7, [sp, #TF_X + 6 * 8] 217 ldp x8, x9, [sp, #TF_X + 8 * 8] 218 ldp x10, x11, [sp, #TF_X + 10 * 8] 219 ldp x12, x13, [sp, #TF_X + 12 * 8] 220 ldp x14, x15, [sp, #TF_X + 14 * 8] 221 ldp x16, x17, [sp, #TF_X + 16 * 8] 222 ldr x19, [sp, #TF_X + 19 * 8] 223 ldp x20, x21, [sp, #TF_X + 20 * 8] 224 ldp x22, x23, [sp, #TF_X + 22 * 8] 225 ldp x24, x25, [sp, #TF_X + 24 * 8] 226 ldp x26, x27, [sp, #TF_X + 26 * 8] 227 ldp x28, x29, [sp, #TF_X + 28 * 8] 228 /* Skip x30 as it was restored above as lr */ 229 230 /* 231 * Disable interrupts to avoid 232 * overwriting spsr_el1 by an IRQ exception. 233 */ 234 msr daifset, #2 235 236 /* Restore elr and spsr */ 237 ldp x0, x1, [sp, #16] 238 msr elr_el1, x0 239 msr spsr_el1, x1 240 241 /* Finally x0 and x1 */ 242 ldp x0, x1, [sp, #TF_X + 0 * 8] 243 ldr x18, [sp, #TF_X + 18 * 8] 244 245 /* 246 * No need for interrupts reenabling since PSR 247 * will be set to the desired value anyway. 248 */ 249 eret 250 251END(fork_trampoline) 252 253ENTRY(savectx) 254 /* Store the callee-saved registers */ 255 stp x8, x9, [x0, #PCB_REGS + 8 * 8] 256 stp x10, x11, [x0, #PCB_REGS + 10 * 8] 257 stp x12, x13, [x0, #PCB_REGS + 12 * 8] 258 stp x14, x15, [x0, #PCB_REGS + 14 * 8] 259 stp x16, x17, [x0, #PCB_REGS + 16 * 8] 260 stp x18, x19, [x0, #PCB_REGS + 18 * 8] 261 stp x20, x21, [x0, #PCB_REGS + 20 * 8] 262 stp x22, x23, [x0, #PCB_REGS + 22 * 8] 263 stp x24, x25, [x0, #PCB_REGS + 24 * 8] 264 stp x26, x27, [x0, #PCB_REGS + 26 * 8] 265 stp x28, x29, [x0, #PCB_REGS + 28 * 8] 266 str x30, [x0, #PCB_REGS + 30 * 8] 267 /* And the old stack pointer */ 268 mov x5, sp 269 mrs x6, tpidr_el0 270 stp x5, x6, [x0, #PCB_SP] 271 272 /* Store the VFP registers */ 273#ifdef VFP 274 mov x28, lr 275 mov x1, x0 /* move pcb to the correct register */ 276 mov x0, xzr /* td = NULL */ 277 bl vfp_save_state 278 mov lr, x28 279#endif 280 281 ret 282END(savectx) 283 284