swtch.S revision 286225
1/*- 2 * Copyright (c) 2014 Andrew Turner 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Andrew Turner under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32#include "assym.s" 33#include "opt_kstack_pages.h" 34#include "opt_sched.h" 35 36#include <machine/asm.h> 37 38__FBSDID("$FreeBSD: head/sys/arm64/arm64/swtch.S 286225 2015-08-03 11:05:02Z andrew $"); 39 40/* 41 * void cpu_throw(struct thread *old, struct thread *new) 42 */ 43ENTRY(cpu_throw) 44#ifdef VFP 45 /* Backup the new thread pointer around a call to C code */ 46 mov x19, x1 47 bl vfp_discard 48 mov x1, x19 49#endif 50 51 /* Store the new curthread */ 52 str x1, [x18, #PC_CURTHREAD] 53 /* And the new pcb */ 54 ldr x4, [x1, #TD_PCB] 55 str x4, [x18, #PC_CURPCB] 56 57 /* 58 * TODO: We may need to flush the cache here. 59 */ 60 61 /* Switch to the new pmap */ 62 ldr x5, [x4, #PCB_L1ADDR] 63 msr ttbr0_el1, x5 64 isb 65 66 /* Invalidate the TLB */ 67 dsb sy 68 tlbi vmalle1is 69 dsb sy 70 isb 71 72 /* Restore the registers */ 73 ldp x5, x6, [x4, #PCB_SP] 74 mov sp, x5 75 msr tpidr_el0, x6 76 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] 77 ldp x10, x11, [x4, #PCB_REGS + 10 * 8] 78 ldp x12, x13, [x4, #PCB_REGS + 12 * 8] 79 ldp x14, x15, [x4, #PCB_REGS + 14 * 8] 80 ldp x16, x17, [x4, #PCB_REGS + 16 * 8] 81 ldr x19, [x4, #PCB_REGS + 19 * 8] 82 ldp x20, x21, [x4, #PCB_REGS + 20 * 8] 83 ldp x22, x23, [x4, #PCB_REGS + 22 * 8] 84 ldp x24, x25, [x4, #PCB_REGS + 24 * 8] 85 ldp x26, x27, [x4, #PCB_REGS + 26 * 8] 86 ldp x28, x29, [x4, #PCB_REGS + 28 * 8] 87 ldr x30, [x4, #PCB_REGS + 30 * 8] 88 89 ret 90END(cpu_throw) 91 92/* 93 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) 94 * 95 * x0 = old 96 * x1 = new 97 * x2 = mtx 98 * x3 to x7, x16 and x17 are caller saved 99 */ 100ENTRY(cpu_switch) 101 /* Store the new curthread */ 102 str x1, [x18, #PC_CURTHREAD] 103 /* And the new pcb */ 104 ldr x4, [x1, #TD_PCB] 105 str x4, [x18, #PC_CURPCB] 106 107 /* 108 * Save the old context. 109 */ 110 ldr x4, [x0, #TD_PCB] 111 112 /* Store the callee-saved registers */ 113 stp x8, x9, [x4, #PCB_REGS + 8 * 8] 114 stp x10, x11, [x4, #PCB_REGS + 10 * 8] 115 stp x12, x13, [x4, #PCB_REGS + 12 * 8] 116 stp x14, x15, [x4, #PCB_REGS + 14 * 8] 117 stp x16, x17, [x4, #PCB_REGS + 16 * 8] 118 stp x18, x19, [x4, #PCB_REGS + 18 * 8] 119 stp x20, x21, [x4, #PCB_REGS + 20 * 8] 120 stp x22, x23, [x4, #PCB_REGS + 22 * 8] 121 stp x24, x25, [x4, #PCB_REGS + 24 * 8] 122 stp x26, x27, [x4, #PCB_REGS + 26 * 8] 123 stp x28, x29, [x4, #PCB_REGS + 28 * 8] 124 str x30, [x4, #PCB_REGS + 30 * 8] 125 /* And the old stack pointer */ 126 mov x5, sp 127 mrs x6, tpidr_el0 128 stp x5, x6, [x4, #PCB_SP] 129 130#ifdef VFP 131 mov x19, x0 132 mov x20, x1 133 mov x21, x2 134 /* Load the pcb address */ 135 mov x1, x4 136 bl vfp_save_state 137 mov x2, x21 138 mov x1, x20 139 mov x0, x19 140#endif 141 142 /* 143 * Restore the saved context. 144 */ 145 ldr x4, [x1, #TD_PCB] 146 147 /* 148 * TODO: We may need to flush the cache here if switching 149 * to a user process. 150 */ 151 152 /* Switch to the new pmap */ 153 ldr x5, [x4, #PCB_L1ADDR] 154 msr ttbr0_el1, x5 155 isb 156 157 /* Invalidate the TLB */ 158 dsb sy 159 tlbi vmalle1is 160 dsb sy 161 isb 162 163 /* Release the old thread */ 164 str x2, [x0, #TD_LOCK] 165#if defined(SCHED_ULE) && defined(SMP) 166 /* Read the value in blocked_lock */ 167 ldr x0, =_C_LABEL(blocked_lock) 168 ldr x1, [x0] 169 /* Load curthread */ 170 ldr x2, [x18, #PC_CURTHREAD] 1711: 172 ldr x3, [x2, #TD_LOCK] 173 cmp x3, x1 174 b.eq 1b 175#endif 176 177 /* Restore the registers */ 178 ldp x5, x6, [x4, #PCB_SP] 179 mov sp, x5 180 msr tpidr_el0, x6 181 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] 182 ldp x10, x11, [x4, #PCB_REGS + 10 * 8] 183 ldp x12, x13, [x4, #PCB_REGS + 12 * 8] 184 ldp x14, x15, [x4, #PCB_REGS + 14 * 8] 185 ldp x16, x17, [x4, #PCB_REGS + 16 * 8] 186 ldr x19, [x4, #PCB_REGS + 19 * 8] 187 ldp x20, x21, [x4, #PCB_REGS + 20 * 8] 188 ldp x22, x23, [x4, #PCB_REGS + 22 * 8] 189 ldp x24, x25, [x4, #PCB_REGS + 24 * 8] 190 ldp x26, x27, [x4, #PCB_REGS + 26 * 8] 191 ldp x28, x29, [x4, #PCB_REGS + 28 * 8] 192 ldr x30, [x4, #PCB_REGS + 30 * 8] 193 194 str xzr, [x4, #PCB_REGS + 18 * 8] 195 ret 196.Lcpu_switch_panic_str: 197 .asciz "cpu_switch: %p\0" 198END(cpu_switch) 199 200ENTRY(fork_trampoline) 201 mov x0, x8 202 mov x1, x9 203 mov x2, sp 204 mov fp, #0 /* Stack traceback stops here. */ 205 bl _C_LABEL(fork_exit) 206 207 /* Restore sp and lr */ 208 ldp x0, x1, [sp] 209 msr sp_el0, x0 210 mov lr, x1 211 212 /* Restore the registers other than x0 and x1 */ 213 ldp x2, x3, [sp, #TF_X + 2 * 8] 214 ldp x4, x5, [sp, #TF_X + 4 * 8] 215 ldp x6, x7, [sp, #TF_X + 6 * 8] 216 ldp x8, x9, [sp, #TF_X + 8 * 8] 217 ldp x10, x11, [sp, #TF_X + 10 * 8] 218 ldp x12, x13, [sp, #TF_X + 12 * 8] 219 ldp x14, x15, [sp, #TF_X + 14 * 8] 220 ldp x16, x17, [sp, #TF_X + 16 * 8] 221 ldr x19, [sp, #TF_X + 19 * 8] 222 ldp x20, x21, [sp, #TF_X + 20 * 8] 223 ldp x22, x23, [sp, #TF_X + 22 * 8] 224 ldp x24, x25, [sp, #TF_X + 24 * 8] 225 ldp x26, x27, [sp, #TF_X + 26 * 8] 226 ldp x28, x29, [sp, #TF_X + 28 * 8] 227 /* Skip x30 as it was restored above as lr */ 228 229 /* 230 * Disable interrupts to avoid 231 * overwriting spsr_el1 by an IRQ exception. 232 */ 233 msr daifset, #2 234 235 /* Restore elr and spsr */ 236 ldp x0, x1, [sp, #16] 237 msr elr_el1, x0 238 msr spsr_el1, x1 239 240 /* Finally x0 and x1 */ 241 ldp x0, x1, [sp, #TF_X + 0 * 8] 242 ldr x18, [sp, #TF_X + 18 * 8] 243 244 /* 245 * No need for interrupts reenabling since PSR 246 * will be set to the desired value anyway. 247 */ 248 eret 249 250END(fork_trampoline) 251 252ENTRY(savectx) 253 /* Store the callee-saved registers */ 254 stp x8, x9, [x0, #PCB_REGS + 8 * 8] 255 stp x10, x11, [x0, #PCB_REGS + 10 * 8] 256 stp x12, x13, [x0, #PCB_REGS + 12 * 8] 257 stp x14, x15, [x0, #PCB_REGS + 14 * 8] 258 stp x16, x17, [x0, #PCB_REGS + 16 * 8] 259 stp x18, x19, [x0, #PCB_REGS + 18 * 8] 260 stp x20, x21, [x0, #PCB_REGS + 20 * 8] 261 stp x22, x23, [x0, #PCB_REGS + 22 * 8] 262 stp x24, x25, [x0, #PCB_REGS + 24 * 8] 263 stp x26, x27, [x0, #PCB_REGS + 26 * 8] 264 stp x28, x29, [x0, #PCB_REGS + 28 * 8] 265 str x30, [x0, #PCB_REGS + 30 * 8] 266 /* And the old stack pointer */ 267 mov x5, sp 268 mrs x6, tpidr_el0 269 stp x5, x6, [x0, #PCB_SP] 270 271 /* Store the VFP registers */ 272#ifdef VFP 273 mov x28, lr 274 mov x1, x0 /* move pcb to the correct register */ 275 mov x0, xzr /* td = NULL */ 276 bl vfp_save_state 277 mov lr, x28 278#endif 279 280 ret 281END(savectx) 282 283