Lines Matching defs:vcpu

94 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
95 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
97 static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
99 return vcpu->arch.last_vmentry_cpu != -1;
102 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
104 return vcpu->arch.exception.pending ||
105 vcpu->arch.exception_vmexit.pending ||
106 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
109 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
111 vcpu->arch.exception.pending = false;
112 vcpu->arch.exception.injected = false;
113 vcpu->arch.exception_vmexit.pending = false;
116 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
119 vcpu->arch.interrupt.injected = true;
120 vcpu->arch.interrupt.soft = soft;
121 vcpu->arch.interrupt.nr = vector;
124 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
126 vcpu->arch.interrupt.injected = false;
129 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
131 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
132 vcpu->arch.nmi_injected;
140 static inline bool is_protmode(struct kvm_vcpu *vcpu)
142 return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
145 static inline bool is_long_mode(struct kvm_vcpu *vcpu)
148 return !!(vcpu->arch.efer & EFER_LMA);
154 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
158 WARN_ON_ONCE(vcpu->arch.guest_state_protected);
160 if (!is_long_mode(vcpu))
162 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
166 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
173 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
185 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
187 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
190 static inline bool is_pae(struct kvm_vcpu *vcpu)
192 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
195 static inline bool is_pse(struct kvm_vcpu *vcpu)
197 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
200 static inline bool is_paging(struct kvm_vcpu *vcpu)
202 return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
205 static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
207 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
210 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
212 return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
215 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
217 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
220 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
223 u64 gen = kvm_memslots(vcpu->kvm)->generation;
232 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
233 vcpu->arch.mmio_access = access;
234 vcpu->arch.mmio_gfn = gfn;
235 vcpu->arch.mmio_gen = gen;
238 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
240 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
249 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
251 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
254 vcpu->arch.mmio_gva = 0;
257 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
259 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
260 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
266 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
268 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
269 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
275 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
277 unsigned long val = kvm_register_read_raw(vcpu, reg);
279 return is_64_bit_mode(vcpu) ? val : (u32)val;
282 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
285 if (!is_64_bit_mode(vcpu))
287 return kvm_register_write_raw(vcpu, reg, val);
295 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
301 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
305 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
309 int handle_ud(struct kvm_vcpu *vcpu);
311 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
314 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
315 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
316 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
317 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
318 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
321 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
322 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
324 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
326 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
380 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
383 vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
386 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
389 vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
392 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
394 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
395 vcpu->arch.virtual_tsc_shift);
437 static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
440 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
443 static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
445 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
448 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
450 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
491 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
492 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
494 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
495 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
497 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
498 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
535 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
537 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
539 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,