Lines Matching defs:save

93 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
94 svm->vmcb01.ptr->save.efer,
152 if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
285 struct vmcb_save_area_cached *save)
287 if (CC(!(save->efer & EFER_SVME)))
290 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
291 CC(save->cr0 & ~0xffffffffULL))
294 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
302 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
303 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
304 CC(!(save->cr0 & X86_CR0_PE)) ||
305 CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
310 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
313 if (CC(!kvm_valid_efer(vcpu, save->efer)))
322 struct vmcb_save_area_cached *save = &svm->nested.save;
324 return __nested_vmcb_check_save(vcpu, save);
405 struct vmcb_save_area *save)
407 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
534 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
554 vmcb02->save.es = vmcb12->save.es;
555 vmcb02->save.cs = vmcb12->save.cs;
556 vmcb02->save.ss = vmcb12->save.ss;
557 vmcb02->save.ds = vmcb12->save.ds;
558 vmcb02->save.cpl = vmcb12->save.cpl;
563 vmcb02->save.gdtr = vmcb12->save.gdtr;
564 vmcb02->save.idtr = vmcb12->save.idtr;
568 kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
570 svm_set_efer(vcpu, svm->nested.save.efer);
572 svm_set_cr0(vcpu, svm->nested.save.cr0);
573 svm_set_cr4(vcpu, svm->nested.save.cr4);
575 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
577 kvm_rax_write(vcpu, vmcb12->save.rax);
578 kvm_rsp_write(vcpu, vmcb12->save.rsp);
579 kvm_rip_write(vcpu, vmcb12->save.rip);
582 vmcb02->save.rax = vmcb12->save.rax;
583 vmcb02->save.rsp = vmcb12->save.rsp;
584 vmcb02->save.rip = vmcb12->save.rip;
588 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
589 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
600 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
782 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
791 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
793 vmcb12->save.rip,
798 vmcb12->save.cr3,
816 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
819 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
863 vmcb12_gpa = svm->vmcb->save.rax;
880 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
895 vmcb01->save.efer = vcpu->arch.efer;
896 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
897 vmcb01->save.cr4 = vcpu->arch.cr4;
898 vmcb01->save.rflags = kvm_get_rflags(vcpu);
899 vmcb01->save.rip = kvm_rip_read(vcpu);
902 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
930 /* Copy state save area fields which are handled by VMRUN */
953 to_vmcb->save.fs = from_vmcb->save.fs;
954 to_vmcb->save.gs = from_vmcb->save.gs;
955 to_vmcb->save.tr = from_vmcb->save.tr;
956 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
957 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
958 to_vmcb->save.star = from_vmcb->save.star;
959 to_vmcb->save.lstar = from_vmcb->save.lstar;
960 to_vmcb->save.cstar = from_vmcb->save.cstar;
961 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
962 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
963 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
964 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
997 vmcb12->save.es = vmcb02->save.es;
998 vmcb12->save.cs = vmcb02->save.cs;
999 vmcb12->save.ss = vmcb02->save.ss;
1000 vmcb12->save.ds = vmcb02->save.ds;
1001 vmcb12->save.gdtr = vmcb02->save.gdtr;
1002 vmcb12->save.idtr = vmcb02->save.idtr;
1003 vmcb12->save.efer = svm->vcpu.arch.efer;
1004 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1005 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1006 vmcb12->save.cr2 = vmcb02->save.cr2;
1007 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1008 vmcb12->save.rflags = kvm_get_rflags(vcpu);
1009 vmcb12->save.rip = kvm_rip_read(vcpu);
1010 vmcb12->save.rsp = kvm_rsp_read(vcpu);
1011 vmcb12->save.rax = kvm_rax_read(vcpu);
1012 vmcb12->save.dr7 = vmcb02->save.dr7;
1013 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1014 vmcb12->save.cpl = vmcb02->save.cpl;
1111 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1112 svm_set_efer(vcpu, vmcb01->save.efer);
1113 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1114 svm_set_cr4(vcpu, vmcb01->save.cr4);
1115 kvm_rax_write(vcpu, vmcb01->save.rax);
1116 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1117 kvm_rip_write(vcpu, vmcb01->save.rip);
1135 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1153 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1364 if (to_svm(vcpu)->vmcb->save.cpl) {
1488 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1638 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1639 sizeof(user_vmcb->save)))
1653 struct vmcb_save_area *save;
1697 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1698 if (!ctl || !save)
1704 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1724 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1725 if (!(save->cr0 & X86_CR0_PG) ||
1726 !(save->cr0 & X86_CR0_PE) ||
1727 (save->rflags & X86_EFLAGS_VM) ||
1735 * vmcb02, and the L1 save state which we store in vmcb01.
1742 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1751 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1755 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1774 kfree(save);