1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/page.h> 24#include <asm/asm-offsets.h> 25#include <asm/exception-64s.h> 26 27#if defined(CONFIG_PPC_BOOK3S_64) 28 29#define ULONG_SIZE 8 30#define FUNC(name) GLUE(.,name) 31 32#define GET_SHADOW_VCPU(reg) \ 33 addi reg, r13, PACA_KVM_SVCPU 34 35#define DISABLE_INTERRUPTS \ 36 mfmsr r0; \ 37 rldicl r0,r0,48,1; \ 38 rotldi r0,r0,16; \ 39 mtmsrd r0,1; \ 40 41#elif defined(CONFIG_PPC_BOOK3S_32) 42 43#define ULONG_SIZE 4 44#define FUNC(name) name 45 46#define GET_SHADOW_VCPU(reg) \ 47 lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) 48 49#define DISABLE_INTERRUPTS \ 50 mfmsr r0; \ 51 rlwinm r0,r0,0,17,15; \ 52 mtmsr r0; \ 53 54#endif /* CONFIG_PPC_BOOK3S_XX */ 55 56 57#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 58#define VCPU_LOAD_NVGPRS(vcpu) \ 59 PPC_LL r14, VCPU_GPR(r14)(vcpu); \ 60 PPC_LL r15, VCPU_GPR(r15)(vcpu); \ 61 PPC_LL r16, VCPU_GPR(r16)(vcpu); \ 62 PPC_LL r17, VCPU_GPR(r17)(vcpu); \ 63 PPC_LL r18, VCPU_GPR(r18)(vcpu); \ 64 PPC_LL r19, VCPU_GPR(r19)(vcpu); \ 65 PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 66 PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 67 PPC_LL r22, VCPU_GPR(r22)(vcpu); \ 68 PPC_LL r23, VCPU_GPR(r23)(vcpu); \ 69 PPC_LL r24, VCPU_GPR(r24)(vcpu); \ 70 PPC_LL r25, VCPU_GPR(r25)(vcpu); \ 71 PPC_LL r26, VCPU_GPR(r26)(vcpu); \ 72 PPC_LL r27, VCPU_GPR(r27)(vcpu); \ 73 PPC_LL r28, VCPU_GPR(r28)(vcpu); \ 74 PPC_LL r29, VCPU_GPR(r29)(vcpu); \ 75 PPC_LL r30, VCPU_GPR(r30)(vcpu); \ 76 PPC_LL r31, VCPU_GPR(r31)(vcpu); \ 77 78/***************************************************************************** 79 * * 80 * Guest entry / exit code that is in kernel module memory (highmem) * 81 * * 82 ****************************************************************************/ 83 84/* Registers: 85 * r3: kvm_run pointer 86 * r4: vcpu pointer 87 */ 88_GLOBAL(__kvmppc_vcpu_entry) 89 90kvm_start_entry: 91 /* Write correct stack frame */ 92 mflr r0 93 PPC_STL r0,PPC_LR_STKOFF(r1) 94 95 /* Save host state to the stack */ 96 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) 97 98 /* Save r3 (kvm_run) and r4 (vcpu) */ 99 SAVE_2GPRS(3, r1) 100 101 /* Save non-volatile registers (r14 - r31) */ 102 SAVE_NVGPRS(r1) 103 104 /* Save LR */ 105 PPC_STL r0, _LINK(r1) 106 107 /* Load non-volatile guest state from the vcpu */ 108 VCPU_LOAD_NVGPRS(r4) 109 110 GET_SHADOW_VCPU(r5) 111 112 /* Save R1/R2 in the PACA */ 113 PPC_STL r1, SVCPU_HOST_R1(r5) 114 PPC_STL r2, SVCPU_HOST_R2(r5) 115 116 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 117 PPC_STL r3, SVCPU_VMHANDLER(r5) 118 119kvm_start_lightweight: 120 121 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 122 123 DISABLE_INTERRUPTS 124 125#ifdef CONFIG_PPC_BOOK3S_64 126 /* Some guests may need to have dcbz set to 32 byte length. 127 * 128 * Usually we ensure that by patching the guest's instructions 129 * to trap on dcbz and emulate it in the hypervisor. 130 * 131 * If we can, we should tell the CPU to use 32 byte dcbz though, 132 * because that's a lot faster. 133 */ 134 135 PPC_LL r3, VCPU_HFLAGS(r4) 136 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 137 beq no_dcbz32_on 138 139 mfspr r3,SPRN_HID5 140 ori r3, r3, 0x80 141 mtspr SPRN_HID5,r3 142 143no_dcbz32_on: 144 145#endif /* CONFIG_PPC_BOOK3S_64 */ 146 147 PPC_LL r6, VCPU_RMCALL(r4) 148 mtctr r6 149 150 PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) 151 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 152 153 /* Jump to segment patching handler and into our guest */ 154 bctr 155 156/* 157 * This is the handler in module memory. It gets jumped at from the 158 * lowmem trampoline code, so it's basically the guest exit code. 159 * 160 */ 161 162.global kvmppc_handler_highmem 163kvmppc_handler_highmem: 164 165 /* 166 * Register usage at this point: 167 * 168 * R1 = host R1 169 * R2 = host R2 170 * R12 = exit handler id 171 * R13 = PACA 172 * SVCPU.* = guest * 173 * 174 */ 175 176 /* R7 = vcpu */ 177 PPC_LL r7, GPR4(r1) 178 179#ifdef CONFIG_PPC_BOOK3S_64 180 181 PPC_LL r5, VCPU_HFLAGS(r7) 182 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 183 beq no_dcbz32_off 184 185 li r4, 0 186 mfspr r5,SPRN_HID5 187 rldimi r5,r4,6,56 188 mtspr SPRN_HID5,r5 189 190no_dcbz32_off: 191 192#endif /* CONFIG_PPC_BOOK3S_64 */ 193 194 PPC_STL r14, VCPU_GPR(r14)(r7) 195 PPC_STL r15, VCPU_GPR(r15)(r7) 196 PPC_STL r16, VCPU_GPR(r16)(r7) 197 PPC_STL r17, VCPU_GPR(r17)(r7) 198 PPC_STL r18, VCPU_GPR(r18)(r7) 199 PPC_STL r19, VCPU_GPR(r19)(r7) 200 PPC_STL r20, VCPU_GPR(r20)(r7) 201 PPC_STL r21, VCPU_GPR(r21)(r7) 202 PPC_STL r22, VCPU_GPR(r22)(r7) 203 PPC_STL r23, VCPU_GPR(r23)(r7) 204 PPC_STL r24, VCPU_GPR(r24)(r7) 205 PPC_STL r25, VCPU_GPR(r25)(r7) 206 PPC_STL r26, VCPU_GPR(r26)(r7) 207 PPC_STL r27, VCPU_GPR(r27)(r7) 208 PPC_STL r28, VCPU_GPR(r28)(r7) 209 PPC_STL r29, VCPU_GPR(r29)(r7) 210 PPC_STL r30, VCPU_GPR(r30)(r7) 211 PPC_STL r31, VCPU_GPR(r31)(r7) 212 213 /* Restore host msr -> SRR1 */ 214 PPC_LL r6, VCPU_HOST_MSR(r7) 215 216 /* 217 * For some interrupts, we need to call the real Linux 218 * handler, so it can do work for us. This has to happen 219 * as if the interrupt arrived from the kernel though, 220 * so let's fake it here where most state is restored. 221 * 222 * Call Linux for hardware interrupts/decrementer 223 * r3 = address of interrupt handler (exit reason) 224 */ 225 226 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 227 beq call_linux_handler 228 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 229 beq call_linux_handler 230 cmpwi r12, BOOK3S_INTERRUPT_PERFMON 231 beq call_linux_handler 232 233 /* Back to EE=1 */ 234 mtmsr r6 235 sync 236 b kvm_return_point 237 238call_linux_handler: 239 240 /* 241 * If we land here we need to jump back to the handler we 242 * came from. 243 * 244 * We have a page that we can access from real mode, so let's 245 * jump back to that and use it as a trampoline to get back into the 246 * interrupt handler! 247 * 248 * R3 still contains the exit code, 249 * R5 VCPU_HOST_RETIP and 250 * R6 VCPU_HOST_MSR 251 */ 252 253 /* Restore host IP -> SRR0 */ 254 PPC_LL r5, VCPU_HOST_RETIP(r7) 255 256 257 mtlr r12 258 259 PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) 260 mtsrr0 r4 261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 262 mtsrr1 r3 263 264 RFI 265 266.global kvm_return_point 267kvm_return_point: 268 269 /* Jump back to lightweight entry if we're supposed to */ 270 /* go back into the guest */ 271 272 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 273 mr r5, r12 274 275 /* Restore r3 (kvm_run) and r4 (vcpu) */ 276 REST_2GPRS(3, r1) 277 bl FUNC(kvmppc_handle_exit) 278 279 /* If RESUME_GUEST, get back in the loop */ 280 cmpwi r3, RESUME_GUEST 281 beq kvm_loop_lightweight 282 283 cmpwi r3, RESUME_GUEST_NV 284 beq kvm_loop_heavyweight 285 286kvm_exit_loop: 287 288 PPC_LL r4, _LINK(r1) 289 mtlr r4 290 291 /* Restore non-volatile host registers (r14 - r31) */ 292 REST_NVGPRS(r1) 293 294 addi r1, r1, SWITCH_FRAME_SIZE 295 blr 296 297kvm_loop_heavyweight: 298 299 PPC_LL r4, _LINK(r1) 300 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) 301 302 /* Load vcpu and cpu_run */ 303 REST_2GPRS(3, r1) 304 305 /* Load non-volatile guest state from the vcpu */ 306 VCPU_LOAD_NVGPRS(r4) 307 308 /* Jump back into the beginning of this function */ 309 b kvm_start_lightweight 310 311kvm_loop_lightweight: 312 313 /* We'll need the vcpu pointer */ 314 REST_GPR(4, r1) 315 316 /* Jump back into the beginning of this function */ 317 b kvm_start_lightweight 318