1/** 2 * \file 3 * \brief Dispatcher architecture-specific implementation. 4 */ 5 6/* 7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich. 8 * All rights reserved. 9 * 10 * This file is distributed under the terms in the attached LICENSE file. 11 * If you do not find this file, copies can be found by writing to: 12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. 13 */ 14 15#include <barrelfish/barrelfish.h> 16#include <barrelfish/dispatch.h> 17#include <barrelfish/dispatcher_arch.h> 18#include <barrelfish/syscalls.h> 19#include <barrelfish/static_assert.h> 20#include "threads_priv.h" 21#include <stdio.h>//for debugging printf 22 23#include <asmoffsets.h> 24#ifndef OFFSETOF_DISP_DISABLED 25#error "Pants!" 26#endif 27 28/* entry points defined in assembler code */ 29extern void run_entry(void); 30extern void pagefault_entry(void); 31extern void disabled_pagefault_entry(void); 32extern void trap_entry(void); 33 34void __attribute__ ((visibility ("hidden"))) disp_resume_context_epilog(void); 35void __attribute__ ((visibility ("hidden"))) disp_switch_epilog(void); 36void __attribute__ ((visibility ("hidden"))) disp_save_epilog(void); 37void __attribute__ ((visibility ("hidden"))) disp_save_rm_kcb_epilog(void); 38 39/////////////////////////////////////////////////////////////////////////////// 40// 41// Low level "C" context switch related code 42// 43 44STATIC_ASSERT(CPSR_REG == 0, "broken context assumption"); 45STATIC_ASSERT(NUM_REGS == 82, "broken context assumption"); 46STATIC_ASSERT(PC_REG == 16, "broken context assumption"); 47 48 49/* 50 * XXX: there is no guarantee that the context has been set up by 51 * disp_save_context, so we can not cut corners by not restoring registers 52 * clobbered in disp_save_context. 53 * e.g. when a new thread is created, it is started using this function, with r0 and r1 54 * being arguments. 55 */ 56static void __attribute__((naked)) __attribute__((noinline)) 57disp_resume_context(struct dispatcher_shared_generic *disp, uint32_t *regs) 58{ 59 __asm volatile( 60 " clrex\n\t" 61 /* Re-enable dispatcher */ 62 " mov r2, #0 \n\t" 63 " str r2, [r0, # " XTR(OFFSETOF_DISP_DISABLED) "] \n\t" 64 /* Restore VFP registers */ 65 " add r2, r1, #68 \n\t" 66 " ldr r3, [r2], #4 \n\t" 67 " vmsr fpscr, r3 \n\t" 68 " vldmia r2!, {d0-d15} \n\t" 69 " vldmia r2, {d16-d31} \n\t" 70 /* Restore cpsr condition bits */ 71 " ldr r0, [r1], #4 \n\t" 72 " msr cpsr, r0 \n\t" 73 /* Restore registers */ 74 " ldmia r1, {r0-r15} \n\t" 75 "disp_resume_context_epilog: \n\t" 76 " mov r0, r0 ; nop \n\t" 77 ::: "r0", "r1", "r2", "r3"); 78} 79 80 81static void __attribute__((naked)) 82disp_save_context(uint32_t *regs) 83{ 84//use normal arm assembly 85 __asm volatile( 86 " clrex\n\t" 87 " mrs r1, cpsr \n\t" 88 " adr r2, disp_save_context_resume \n\t" 89 " stmib r0, {r0-r14} \n\t" 90 " vmrs r3, fpscr \n\t" 91 " str r3, [r0, #68] \n\t" 92 " add r3, r0, #72 \n\t" 93 " vstmia r3!, {d0-d15} \n\t" 94 " vstmia r3, {d16-d31} \n\t" 95 " str r1, [r0] \n\t" 96 " str r2, [r0, # (" XTR(PC_REG) " * 4)] \n\t" 97 "disp_save_context_resume: \n\t" 98 " bx lr \n\t" 99 ::: "r0", "r1", "r2", "r3"); 100} 101 102/////////////////////////////////////////////////////////////////////////////// 103 104/** 105 * \brief Resume execution of a given register state 106 * 107 * This function resumes the execution of the given register state on the 108 * current dispatcher. It may only be called while the dispatcher is disabled. 109 * 110 * \param disp Current dispatcher pointer 111 * \param regs Register state snapshot 112 */ 113void 114disp_resume(dispatcher_handle_t handle, 115 arch_registers_state_t *archregs) 116 117{ 118 struct dispatcher_shared_arm *disp = 119 get_dispatcher_shared_arm(handle); 120 121 // The definition of disp_resume_end is a totally flakey. The system 122 // uses the location of the PC to determine where to spill the thread 123 // context for exceptions and interrupts. There are two safe ways of doing 124 // this: 125 // 126 // 1) Write this entire function in assmebler. 127 // 2) Write this function in C and write a linker script to emit 128 // function bounds. 129 130 assert_disabled(curdispatcher() == handle); 131 assert_disabled(disp->d.disabled); 132 assert_disabled(disp->d.haswork); 133 134#ifdef CONFIG_DEBUG_DEADLOCKS 135 ((struct disp_priv *)disp)->yieldcount = 0; 136#endif 137 disp_resume_context(&disp->d, archregs->regs); 138} 139 140/** 141 * \brief Switch execution between two register states 142 * 143 * This function saves as much as necessary of the current register state 144 * (which, when resumed will return to the caller), and switches execution 145 * by resuming the given register state. It may only be called while the 146 * dispatcher is disabled. 147 * 148 * \param disp Current dispatcher pointer 149 * \param from_regs Location to save current register state 150 * \param to_regs Location from which to resume new register state 151 */ 152void disp_switch(dispatcher_handle_t handle, 153 arch_registers_state_t *from_state, 154 arch_registers_state_t *to_state) 155{ 156 struct dispatcher_shared_arm *disp = 157 get_dispatcher_shared_arm(handle); 158 159 assert_disabled(curdispatcher() == handle); 160 assert_disabled(disp->d.disabled); 161 assert_disabled(disp->d.haswork); 162 assert_disabled(to_state != NULL); 163 164 disp_save_context(from_state->regs); 165 from_state->named.pc = (lvaddr_t)disp_switch_epilog; 166 disp_resume_context(&disp->d, to_state->regs); 167 168 __asm volatile("disp_switch_epilog:"); 169} 170 171/** 172 * \brief Save the current register state and optionally yield the CPU 173 * 174 * This function saves as much as necessary of the current register state 175 * (which, when resumed will return to the caller), and then either 176 * re-enters the thread scheduler or yields the CPU. 177 * It may only be called while the dispatcher is disabled. 178 * 179 * \param disp Current dispatcher pointer 180 * \param regs Location to save current register state 181 * \param yield If true, yield CPU to kernel; otherwise re-run thread scheduler 182 * \param yield_to Endpoint capability for dispatcher to which we want to yield 183 */ 184void disp_save(dispatcher_handle_t handle, 185 arch_registers_state_t *state, 186 bool yield, capaddr_t yield_to) 187{ 188 struct dispatcher_shared_arm *disp = 189 get_dispatcher_shared_arm(handle); 190 191 assert_disabled(curdispatcher() == handle); 192 assert_disabled(disp->d.disabled); 193 194 disp_save_context(state->regs); 195 state->named.pc = (lvaddr_t)disp_save_epilog; 196 197 if (yield) { 198 sys_yield(yield_to); 199 // may fail if target doesn't exist; if so, just fall through 200 } 201 // this code won't run if the yield succeeded 202 203 // enter thread scheduler again 204 // this doesn't return, and will call disp_yield if there's nothing to do 205 thread_run_disabled(handle); 206 207 __asm volatile("disp_save_epilog:"); 208} 209 210void disp_save_rm_kcb(void) 211{ 212 dispatcher_handle_t handle = disp_disable(); 213 struct dispatcher_shared_arm *disp = 214 get_dispatcher_shared_arm(handle); 215 arch_registers_state_t *state = 216 dispatcher_get_enabled_save_area(handle); 217 218 assert_disabled(curdispatcher() == handle); 219 assert_disabled(disp->d.disabled); 220 221 disp_save_context(state->regs); 222 state->named.pc = (lvaddr_t)disp_save_rm_kcb_epilog; 223 224 sys_suspend(false); 225 226 // enter thread scheduler again 227 // this doesn't return, and will call disp_yield if there's nothing to do 228 thread_run_disabled(handle); 229 230 __asm volatile("disp_save_rm_kcb_epilog:"); 231} 232 233 234/** 235 * \brief Architecture-specific dispatcher initialisation 236 */ 237void disp_arch_init(dispatcher_handle_t handle) 238{ 239 struct dispatcher_shared_arm *disp = 240 get_dispatcher_shared_arm(handle); 241 242 disp->d.dispatcher_run = (lvaddr_t)run_entry; 243 disp->d.dispatcher_pagefault = (lvaddr_t)pagefault_entry; 244 disp->d.dispatcher_pagefault_disabled = (lvaddr_t)disabled_pagefault_entry; 245 disp->d.dispatcher_trap = (lvaddr_t)trap_entry; 246 disp->crit_pc_low = (lvaddr_t)disp_resume_context; 247 disp->crit_pc_high = (lvaddr_t)disp_resume_context_epilog; 248} 249