1/** 2 * \file 3 * \brief Dispatcher architecture-specific implementation. 4 */ 5 6/* 7 * Copyright (c) 2007-2010,2012, ETH Zurich. 8 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP. 9 * All rights reserved. 10 * 11 * This file is distributed under the terms in the attached LICENSE file. 12 * If you do not find this file, copies can be found by writing to: 13 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group. 14 */ 15 16#include <barrelfish/barrelfish.h> 17#include <barrelfish/dispatch.h> 18#include <barrelfish/dispatcher_arch.h> 19#include <barrelfish/syscalls.h> 20#include <barrelfish/static_assert.h> 21#include "threads_priv.h" 22#include <stdio.h>//for debugging printf 23 24#include <asmoffsets.h> 25#ifndef OFFSETOF_DISP_DISABLED 26#error "Pants!" 27#endif 28 29/* entry points defined in assembler code */ 30extern void run_entry(void); 31extern void pagefault_entry(void); 32extern void disabled_pagefault_entry(void); 33extern void trap_entry(void); 34 35void __attribute__ ((visibility ("hidden"))) disp_resume_context_epilog(void); 36void __attribute__ ((visibility ("hidden"))) disp_switch_epilog(void); 37void __attribute__ ((visibility ("hidden"))) disp_save_epilog(void); 38void __attribute__ ((visibility ("hidden"))) disp_save_rm_kcb_epilog(void); 39 40/////////////////////////////////////////////////////////////////////////////// 41// 42// Low level "C" context switch related code 43// 44 45STATIC_ASSERT(PC_REG == 32, "broken context assumption"); 46STATIC_ASSERT(SPSR_REG == 33, "broken context assumption"); 47STATIC_ASSERT(NUM_REGS == 98, "broken context assumption"); 48 49#define NZCV_MASK (MASK(4) << 28) 50 51/* 52 * XXX: there is no guarantee that the context has been set up by 53 * disp_save_context, so we can not cut corners by not restoring registers 54 * clobbered in disp_save_context. 55 * e.g. when a new thread is created, it is started using this function, with x0 and x1 56 * being arguments. 57 */ 58static void __attribute__((noinline, optimize(2))) 59disp_resume_context(struct dispatcher_shared_generic *disp, uint64_t *regs) 60{ 61 /* Re-enable dispatcher once we pass the end of the critical section. */ 62 disp->disabled = 0; 63 64 __asm volatile( 65 /* Restore the PSTATE condition bits (NZCV), and load the resume 66 * address into x18, our platform scratch register. */ 67 "ldp x18, x2, [%[regs], #("XTR(PC_REG)" * 8)]\n" 68 "and x2, x2, #0xf0000000\n" /* Set only the top four bits: NZCV. */ 69 "msr nzcv, x2\n" 70 /* Restore the stack pointer, and x30 while we're at it. */ 71 "ldp x30, x2, [%[regs], #(30 * 8)]\n" 72 "mov sp, x2\n" 73 /* Restore everything else. */ 74 "ldp q30, q31, [%[regs], #(94 * 8)]\n" 75 "ldp q28, q29, [%[regs], #(90 * 8)]\n" 76 "ldp q26, q27, [%[regs], #(86 * 8)]\n" 77 "ldp q24, q25, [%[regs], #(82 * 8)]\n" 78 "ldp q22, q23, [%[regs], #(78 * 8)]\n" 79 "ldp q20, q21, [%[regs], #(74 * 8)]\n" 80 "ldp q18, q19, [%[regs], #(70 * 8)]\n" 81 "ldp q16, q17, [%[regs], #(66 * 8)]\n" 82 "ldp q14, q15, [%[regs], #(62 * 8)]\n" 83 "ldp q12, q13, [%[regs], #(58 * 8)]\n" 84 "ldp q10, q11, [%[regs], #(54 * 8)]\n" 85 "ldp q8, q9, [%[regs], #(50 * 8)]\n" 86 "ldp q6, q7, [%[regs], #(46 * 8)]\n" 87 "ldp q4, q5, [%[regs], #(42 * 8)]\n" 88 "ldp q2, q3, [%[regs], #(38 * 8)]\n" 89 "ldp q0, q1, [%[regs], #(34 * 8)]\n" 90 91 "ldp x28, x29, [%[regs], #(28 * 8)]\n" 92 "ldp x26, x27, [%[regs], #(26 * 8)]\n" 93 "ldp x24, x25, [%[regs], #(24 * 8)]\n" 94 "ldp x22, x23, [%[regs], #(22 * 8)]\n" 95 "ldp x20, x21, [%[regs], #(20 * 8)]\n" 96 "ldr x19, [%[regs], #(19 * 8)]\n" /* n.b. don't reload x18 */ 97 "ldp x16, x17, [%[regs], #(16 * 8)]\n" 98 "ldp x14, x15, [%[regs], #(14 * 8)]\n" 99 "ldp x12, x13, [%[regs], #(12 * 8)]\n" 100 "ldp x10, x11, [%[regs], #(10 * 8)]\n" 101 "ldp x8, x9, [%[regs], #( 8 * 8)]\n" 102 "ldp x6, x7, [%[regs], #( 6 * 8)]\n" 103 "ldp x4, x5, [%[regs], #( 4 * 8)]\n" 104 "ldp x2, x3, [%[regs], #( 2 * 8)]\n" 105 "ldp x0, x1, [%[regs], #( 0 * 8)]\n" /* n.b. this clobbers x0&x1 */ 106 /* Return to the thread. */ 107 "br x18\n" 108 /* This is the end of the critical section. This code isn't executed, 109 * but we need to make sure it's a valid address. */ 110 "disp_resume_context_epilog:\n" 111 "nop\n" 112 :: [regs] "r" (regs)); 113} 114 115/* See context.S */ 116void disp_save_context(uint64_t *regs); 117 118/////////////////////////////////////////////////////////////////////////////// 119 120/** 121 * \brief Resume execution of a given register state 122 * 123 * This function resumes the execution of the given register state on the 124 * current dispatcher. It may only be called while the dispatcher is disabled. 125 * 126 * \param disp Current dispatcher pointer 127 * \param regs Register state snapshot 128 */ 129void 130disp_resume(dispatcher_handle_t handle, 131 arch_registers_state_t *archregs) 132 133{ 134 struct dispatcher_shared_aarch64 *disp = 135 get_dispatcher_shared_aarch64(handle); 136 137 // XXX - the work flakey shouldn't appear in this code -DC 138 // The definition of disp_resume_end is a totally flakey. The system 139 // uses the location of the PC to determine where to spill the thread 140 // context for exceptions and interrupts. There are two safe ways of doing 141 // this: 142 // 143 // 1) Write this entire function in assmebler. 144 // 2) Write this function in C and write a linker script to emit 145 // function bounds. 146 147 assert_disabled(curdispatcher() == handle); 148 assert_disabled(disp->d.disabled); 149 assert_disabled(disp->d.haswork); 150 151#ifdef CONFIG_DEBUG_DEADLOCKS 152 ((struct disp_priv *)disp)->yieldcount = 0; 153#endif 154 155 disp_resume_context(&disp->d, archregs->regs); 156} 157 158/** 159 * \brief Switch execution between two register states 160 * 161 * This function saves as much as necessary of the current register state 162 * (which, when resumed will return to the caller), and switches execution 163 * by resuming the given register state. It may only be called while the 164 * dispatcher is disabled. 165 * 166 * \param disp Current dispatcher pointer 167 * \param from_regs Location to save current register state 168 * \param to_regs Location from which to resume new register state 169 */ 170void disp_switch(dispatcher_handle_t handle, 171 arch_registers_state_t *from_state, 172 arch_registers_state_t *to_state) 173{ 174 struct dispatcher_shared_aarch64 *disp = 175 get_dispatcher_shared_aarch64(handle); 176 177 assert_disabled(curdispatcher() == handle); 178 assert_disabled(disp->d.disabled); 179 assert_disabled(disp->d.haswork); 180 assert_disabled(to_state != NULL); 181 182 disp_save_context(from_state->regs); 183 from_state->named.pc = (lvaddr_t)disp_switch_epilog; 184 disp_resume_context(&disp->d, to_state->regs); 185 186 __asm volatile("disp_switch_epilog:"); 187} 188 189/** 190 * \brief Save the current register state and optionally yield the CPU 191 * 192 * This function saves as much as necessary of the current register state 193 * (which, when resumed will return to the caller), and then either 194 * re-enters the thread scheduler or yields the CPU. 195 * It may only be called while the dispatcher is disabled. 196 * 197 * \param disp Current dispatcher pointer 198 * \param regs Location to save current register state 199 * \param yield If true, yield CPU to kernel; otherwise re-run thread scheduler 200 * \param yield_to Endpoint capability for dispatcher to which we want to yield 201 */ 202void disp_save(dispatcher_handle_t handle, 203 arch_registers_state_t *state, 204 bool yield, capaddr_t yield_to) 205{ 206 struct dispatcher_shared_aarch64 *disp = 207 get_dispatcher_shared_aarch64(handle); 208 209 assert_disabled(curdispatcher() == handle); 210 assert_disabled(disp->d.disabled); 211 212 disp_save_context(state->regs); 213 state->named.pc = (lvaddr_t)disp_save_epilog; 214 215 if (yield) { 216 sys_yield(yield_to); 217 // may fail if target doesn't exist; if so, just fall through 218 } 219 // this code won't run if the yield succeeded 220 221 // enter thread scheduler again 222 // this doesn't return, and will call disp_yield if there's nothing to do 223 thread_run_disabled(handle); 224 225 __asm volatile("disp_save_epilog:"); 226} 227 228void disp_save_rm_kcb(void) 229{ 230 dispatcher_handle_t handle = disp_disable(); 231 struct dispatcher_shared_aarch64 *disp = 232 get_dispatcher_shared_aarch64(handle); 233 arch_registers_state_t *state = 234 dispatcher_get_enabled_save_area(handle); 235 236 assert_disabled(curdispatcher() == handle); 237 assert_disabled(disp->d.disabled); 238 239 disp_save_context(state->regs); 240 state->named.pc = (lvaddr_t)disp_save_rm_kcb_epilog; 241 242 sys_suspend(false); 243 244 // enter thread scheduler again 245 // this doesn't return, and will call disp_yield if there's nothing to do 246 thread_run_disabled(handle); 247 248 __asm volatile("disp_save_rm_kcb_epilog:"); 249} 250 251 252/** 253 * \brief Architecture-specific dispatcher initialisation 254 */ 255void disp_arch_init(dispatcher_handle_t handle) 256{ 257 struct dispatcher_shared_aarch64 *disp = 258 get_dispatcher_shared_aarch64(handle); 259 260 disp->d.dispatcher_run = (lvaddr_t)run_entry; 261 disp->d.dispatcher_pagefault = (lvaddr_t)pagefault_entry; 262 disp->d.dispatcher_pagefault_disabled = (lvaddr_t)disabled_pagefault_entry; 263 disp->d.dispatcher_trap = (lvaddr_t)trap_entry; 264 disp->crit_pc_low = (lvaddr_t)disp_resume_context; 265 disp->crit_pc_high = (lvaddr_t)disp_resume_context_epilog; 266} 267