Searched refs:nested (Results 26 - 50 of 62) sorted by path

123

/linux-master/arch/x86/kvm/svm/
H A Dhyperv.h18 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
24 hv_vcpu->nested.pa_page_gpa = hve->partition_assist_page;
25 hv_vcpu->nested.vm_id = hve->hv_vm_id;
26 hv_vcpu->nested.vp_id = hve->hv_vp_id;
32 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
H A Dnested.c44 * TODO: track the cause of the nested page fault, and
62 u64 cr3 = svm->nested.ctl.nested_cr3;
77 return svm->nested.ctl.nested_cr3;
95 svm->nested.ctl.nested_cr3);
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
135 g = &svm->nested.ctl;
157 * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
201 if (!svm->nested.force_msr_bitmap_recalc) {
202 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
206 (svm->nested
[all...]
H A Dsvm.c195 * Use nested page tables by default. Note, NPT may get forced off by
201 /* allow nested virtualization in KVM/SVM */
202 static int nested = true; variable
203 module_param(nested, int, 0444);
319 * Free the nested guest state, unless we are in SMM.
320 * In this case we will return to the nested guest
484 /* Use _safe variants to not break nested virtualization */
802 * For non-nested case:
806 * For nested case:
810 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested
[all...]
H A Dsvm.h172 * we cannot inject a nested vmexit yet. */
242 struct svm_nested_state nested; member in struct:vcpu_svm
458 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
467 return svm->nested.vmcb02.ptr;
504 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
510 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
572 /* nested.c */
575 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
582 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
587 return vmcb12_is_intercept(&svm->nested
[all...]
/linux-master/arch/x86/kvm/vmx/
H A Dcapabilities.h67 struct nested_vmx_msrs nested; member in struct:vmcs_config
H A Dhyperv.c9 #include "nested.h"
41 (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
203 vmx->nested.enlightened_vmcs_enabled = true;
215 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
H A Dhyperv.h27 return evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
37 return evmptr_is_set(vmx->nested.hv_evmcs_vmptr);
42 return vmx->nested.hv_evmcs;
52 to_vmx(vcpu)->nested.enlightened_vmcs_enabled;
H A Dnested.c13 #include "nested.h"
183 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
196 if (vmx->nested.current_vmptr == INVALID_GPA &&
207 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator);
224 vmx->nested.need_vmcs12_to_shadow_sync = false;
234 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
235 vmx->nested.hv_evmcs = NULL;
238 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
241 hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
242 hv_vcpu->nested
[all...]
H A Dnested.h42 return to_vmx(vcpu)->nested.cached_vmcs12;
47 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
52 * in vmx_set_nested_state; if it is satisfied, the nested state must include
60 return vmx->nested.current_vmptr != -1ull ||
68 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
83 * Return the cr0/4 value that a nested guest would read. This is a combination
101 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
111 return to_vmx(vcpu)->nested.msrs.misc_low &
117 return to_vmx(vcpu)->nested
[all...]
H A Dvmx.c63 #include "nested.h"
118 * If nested=1, nested virtualization is supported, i.e., guests may use
119 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
122 static bool __read_mostly nested = 1; variable
123 module_param(nested, bool, 0444);
300 * order to protect against KSM in the nested
898 /* When we are running a nested L2 guest and L1 specified for it a
1311 if (vmx->nested.need_vmcs12_to_shadow_sync)
1447 * performs IBPB on nested V
[all...]
H A Dvmx.h117 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
175 * is delayed until L1 actually attempts to run a nested VM.
223 * userspace restores MSRs before nested state. If userspace restores
224 * MSRs after nested state, the snapshot holds garbage, but KVM can't
291 * non-nested (L1) guest, it always points to vmcs01. For a nested
331 /* Support for a guest hypervisor (nested VMX) */
332 struct nested_vmx nested; member in struct:vcpu_vmx
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dinit.c44 '0' + (init->nested - 1) : ' ', ##args); \
2306 init->nested++;
2317 init->nested--;
/linux-master/drivers/iommu/intel/
H A DMakefile3 obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o
/linux-master/drivers/mtd/ubi/
H A Dwl.c590 * @nested: denotes whether the work_sem is already held
596 int vol_id, int lnum, int torture, bool nested)
615 if (nested)
647 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
1016 * @nested: set to non-zero if this function is called from UBI worker
1022 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1074 if (nested)
595 schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture, bool nested) argument
/linux-master/drivers/net/wireless/realtek/rtw89/
H A Ddebug.c370 bool nested; member in struct:txpwr_ent
387 { .nested = true, .ptr = __txpwr_ent_##_e, \
691 if (ent->nested) {
/linux-master/drivers/usb/fotg210/
H A Dfotg210-hcd.c3087 static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) argument
3102 if (!nested) /* Avoid recursion */
/linux-master/fs/ocfs2/
H A Djournal.c374 int ret, nested; local
379 nested = handle->h_ref > 1;
384 if (!nested) {
/linux-master/include/net/
H A Dsock.h1886 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1889 const int nested)
1891 return __sk_receive_skb(sk, skb, nested, 1, true);
1888 sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) argument
/linux-master/kernel/irq/
H A Dmanage.c713 * nested.
730 * nested.
752 * nested.
778 * nested.
1045 * Primary handler for nested threaded interrupts. Should never be
1050 WARN(1, "Primary handler called for nested irq %d\n", irq);
1514 int ret, nested, shared = 0; local
1537 nested = irq_settings_is_nested_thread(desc);
1538 if (nested) {
1545 * the driver for non nested interrup
[all...]
/linux-master/kernel/sched/
H A Dcore.c10143 unsigned int nested = preempt_count(); local
10145 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
10147 return nested == offsets;
/linux-master/kernel/trace/
H A Dring_buffer.c399 * Structure to hold event state and handle nested events.
659 * In the highly unlikely case that the event was nested more than
1439 * only happens in the outer most nested commit.
3201 * ring_buffer_nest_start - Allow to trace while nested
3211 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3227 * ring_buffer_nest_end - Allow to trace while nested
6605 static __init int rb_write_something(struct rb_test_data *data, bool nested) argument
6615 /* Have nested writes different that what is written */
6616 cnt = data->cnt + (nested ? 27 : 0);
6631 if (nested)
[all...]
/linux-master/net/core/
H A Dsock.c546 const int nested, unsigned int trim_cap, bool refcounted)
559 if (nested)
545 __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, unsigned int trim_cap, bool refcounted) argument
/linux-master/net/netfilter/ipset/
H A Dip_set_bitmap_gen.h95 struct nlattr *nested; local
98 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
99 if (!nested)
108 nla_nest_end(skb, nested);
207 struct nlattr *adt, *nested; local
229 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
230 if (!nested) {
243 nla_nest_end(skb, nested);
253 nla_nest_cancel(skb, nested);
H A Dip_set_hash_gen.h1268 struct nlattr *nested; local
1281 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1282 if (!nested)
1320 nla_nest_end(skb, nested);
1357 struct nlattr *atd, *nested; local
1390 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1391 if (!nested) {
1403 nla_nest_end(skb, nested);
H A Dip_set_list_set.c459 struct nlattr *nested; local
462 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
463 if (!nested)
472 nla_nest_end(skb, nested);
484 struct nlattr *atd, *nested; local
502 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
503 if (!nested)
510 nla_nest_end(skb, nested);
520 nla_nest_cancel(skb, nested);

Completed in 389 milliseconds

123