Lines Matching defs:svm

30 #include "svm.h"
581 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
583 struct sev_es_save_area *save = svm->sev_es.vmsa;
586 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
595 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
598 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
599 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
600 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
601 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
602 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
603 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
604 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
605 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
607 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
608 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
609 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
610 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
611 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
612 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
613 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
614 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
616 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
619 save->xcr0 = svm->vcpu.arch.xcr0;
620 save->pkru = svm->vcpu.arch.pkru;
621 save->xss = svm->vcpu.arch.ia32_xss;
622 save->dr6 = svm->vcpu.arch.dr6;
640 struct vcpu_svm *svm = to_svm(vcpu);
649 ret = sev_es_sync_vmsa(svm);
658 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
662 vmsa.address = __sme_pa(svm->sev_es.vmsa);
2377 struct vcpu_svm *svm;
2382 svm = to_svm(vcpu);
2385 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
2387 __free_page(virt_to_page(svm->sev_es.vmsa));
2389 if (svm->sev_es.ghcb_sa_free)
2390 kvfree(svm->sev_es.ghcb_sa);
2393 static void dump_ghcb(struct vcpu_svm *svm)
2395 struct ghcb *ghcb = svm->sev_es.ghcb;
2406 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2418 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2420 struct kvm_vcpu *vcpu = &svm->vcpu;
2421 struct ghcb *ghcb = svm->sev_es.ghcb;
2437 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2439 struct vmcb_control_area *control = &svm->vmcb->control;
2440 struct kvm_vcpu *vcpu = &svm->vcpu;
2441 struct ghcb *ghcb = svm->sev_es.ghcb;
2458 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
2459 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
2461 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
2462 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
2463 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
2464 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
2465 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
2467 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
2469 if (kvm_ghcb_xcr0_is_valid(svm)) {
2480 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
2491 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2493 struct vmcb_control_area *control = &svm->vmcb->control;
2494 struct kvm_vcpu *vcpu = &svm->vcpu;
2505 if (svm->sev_es.ghcb->ghcb_usage) {
2512 if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
2513 !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
2514 !kvm_ghcb_sw_exit_info_2_is_valid(svm))
2521 if (!kvm_ghcb_rax_is_valid(svm))
2527 if (!kvm_ghcb_rcx_is_valid(svm))
2531 if (!kvm_ghcb_rax_is_valid(svm) ||
2532 !kvm_ghcb_rcx_is_valid(svm))
2535 if (!kvm_ghcb_xcr0_is_valid(svm))
2542 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2546 if (!kvm_ghcb_rax_is_valid(svm))
2551 if (!kvm_ghcb_rcx_is_valid(svm))
2554 if (!kvm_ghcb_rax_is_valid(svm) ||
2555 !kvm_ghcb_rdx_is_valid(svm))
2560 if (!kvm_ghcb_rax_is_valid(svm) ||
2561 !kvm_ghcb_cpl_is_valid(svm))
2569 if (!kvm_ghcb_rax_is_valid(svm) ||
2570 !kvm_ghcb_rcx_is_valid(svm) ||
2571 !kvm_ghcb_rdx_is_valid(svm))
2575 if (!kvm_ghcb_rax_is_valid(svm) ||
2576 !kvm_ghcb_rcx_is_valid(svm))
2581 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2599 svm->sev_es.ghcb->ghcb_usage);
2606 dump_ghcb(svm);
2609 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2610 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
2616 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2618 if (!svm->sev_es.ghcb)
2621 if (svm->sev_es.ghcb_sa_free) {
2627 if (svm->sev_es.ghcb_sa_sync) {
2628 kvm_write_guest(svm->vcpu.kvm,
2629 svm->sev_es.sw_scratch,
2630 svm->sev_es.ghcb_sa,
2631 svm->sev_es.ghcb_sa_len);
2632 svm->sev_es.ghcb_sa_sync = false;
2635 kvfree(svm->sev_es.ghcb_sa);
2636 svm->sev_es.ghcb_sa = NULL;
2637 svm->sev_es.ghcb_sa_free = false;
2640 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2642 sev_es_sync_to_ghcb(svm);
2644 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2645 svm->sev_es.ghcb = NULL;
2648 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2651 unsigned int asid = sev_get_asid(svm->vcpu.kvm);
2654 svm->asid = asid;
2662 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2663 svm->vcpu.arch.last_vmentry_cpu == cpu)
2666 sd->sev_vmcbs[asid] = svm->vmcb;
2667 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2668 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2672 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2674 struct vmcb_control_area *control = &svm->vmcb->control;
2679 scratch_gpa_beg = svm->sev_es.sw_scratch;
2710 scratch_va = (void *)svm->sev_es.ghcb;
2726 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2740 svm->sev_es.ghcb_sa_sync = sync;
2741 svm->sev_es.ghcb_sa_free = true;
2744 svm->sev_es.ghcb_sa = scratch_va;
2745 svm->sev_es.ghcb_sa_len = len;
2750 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2751 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2756 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2759 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2760 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2763 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2765 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2768 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2770 svm->vmcb->control.ghcb_gpa = value;
2773 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2775 struct vmcb_control_area *control = &svm->vmcb->control;
2776 struct kvm_vcpu *vcpu = &svm->vcpu;
2782 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2787 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2794 cpuid_fn = get_ghcb_msr_bits(svm,
2808 cpuid_reg = get_ghcb_msr_bits(svm,
2820 set_ghcb_msr_bits(svm, cpuid_value,
2824 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2832 reason_set = get_ghcb_msr_bits(svm,
2835 reason_code = get_ghcb_msr_bits(svm,
2853 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2861 struct vcpu_svm *svm = to_svm(vcpu);
2862 struct vmcb_control_area *control = &svm->vmcb->control;
2869 return sev_handle_vmgexit_msr_protocol(svm);
2878 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2887 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2889 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
2891 sev_es_sync_from_ghcb(svm);
2892 ret = sev_es_validate_vmgexit(svm);
2896 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
2897 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
2902 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2909 svm->sev_es.ghcb_sa);
2912 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2919 svm->sev_es.ghcb_sa);
2923 svm->nmi_masked = false;
2940 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
2943 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2945 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2946 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
2965 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2971 if (svm->vmcb->control.exit_info_2 > INT_MAX)
2974 count = svm->vmcb->control.exit_info_2;
2978 r = setup_vmgexit_scratch(svm, in, bytes);
2982 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2986 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
2988 struct kvm_vcpu *vcpu = &svm->vcpu;
2994 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
3012 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
3014 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
3017 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
3019 struct kvm_vcpu *vcpu = &svm->vcpu;
3027 if (sev_es_guest(svm->vcpu.kvm))
3028 sev_es_vcpu_after_set_cpuid(svm);
3031 static void sev_es_init_vmcb(struct vcpu_svm *svm)
3033 struct vmcb *vmcb = svm->vmcb01.ptr;
3034 struct kvm_vcpu *vcpu = &svm->vcpu;
3036 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
3037 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
3046 if (svm->sev_es.vmsa)
3047 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
3050 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
3051 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
3052 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
3053 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
3054 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
3055 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3057 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
3060 svm_set_intercept(svm, TRAP_EFER_WRITE);
3061 svm_set_intercept(svm, TRAP_CR0_WRITE);
3062 svm_set_intercept(svm, TRAP_CR4_WRITE);
3063 svm_set_intercept(svm, TRAP_CR8_WRITE);
3069 recalc_intercepts(svm);
3080 clr_exception_intercept(svm, DB_VECTOR);
3084 svm_clr_intercept(svm, INTERCEPT_XSETBV);
3087 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
3088 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
3089 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
3090 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
3091 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
3092 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
3095 void sev_init_vmcb(struct vcpu_svm *svm)
3097 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
3098 clr_exception_intercept(svm, UD_VECTOR);
3104 clr_exception_intercept(svm, GP_VECTOR);
3106 if (sev_es_guest(svm->vcpu.kvm))
3107 sev_es_init_vmcb(svm);
3110 void sev_es_vcpu_reset(struct vcpu_svm *svm)
3116 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
3163 struct vcpu_svm *svm = to_svm(vcpu);
3166 if (!svm->sev_es.received_first_sipi) {
3167 svm->sev_es.received_first_sipi = true;
3176 if (!svm->sev_es.ghcb)
3179 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);