Lines Matching refs:ctl

62 	u64 cr3 = svm->nested.ctl.nested_cr3;
77 return svm->nested.ctl.nested_cr3;
95 svm->nested.ctl.nested_cr3);
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
135 g = &svm->nested.ctl;
202 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
206 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
211 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
227 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
330 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
332 return __nested_vmcb_check_controls(vcpu, ctl);
385 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
417 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
418 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
431 !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
440 svm->nested.ctl.int_ctl &= ~mask;
441 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
594 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
651 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
683 svm->nested.ctl.tsc_offset,
693 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
696 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
697 vmcb02->control.int_state = svm->nested.ctl.int_state;
698 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
699 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
710 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
720 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
729 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
735 pause_count12 = svm->nested.ctl.pause_filter_count;
739 pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
753 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
1028 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1029 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1030 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1065 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1106 svm->nested.ctl.nested_cr3 = 0;
1170 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1256 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1270 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1283 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1289 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1314 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1319 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1337 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1377 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1418 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1583 struct vmcb_control_area *ctl;
1627 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1628 if (!ctl)
1631 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1632 r = copy_to_user(&user_vmcb->control, ctl,
1634 kfree(ctl);
1652 struct vmcb_control_area *ctl;
1696 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1698 if (!ctl || !save)
1702 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1708 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1752 nested_copy_vmcb_control_to_cache(svm, ctl);
1775 kfree(ctl);