1182902Skmacy/* SPDX-License-Identifier: GPL-2.0 */
2182902Skmacy#ifndef __KVM_X86_VMX_X86_OPS_H
3182902Skmacy#define __KVM_X86_VMX_X86_OPS_H
4182902Skmacy
5182902Skmacy#include <linux/kvm_host.h>
6182902Skmacy
7182902Skmacy#include "x86.h"
8182902Skmacy
9182902Skmacy__init int vmx_hardware_setup(void);
10182902Skmacy
11182902Skmacyextern struct kvm_x86_ops vt_x86_ops __initdata;
12182902Skmacyextern struct kvm_x86_init_ops vt_init_ops __initdata;
13182902Skmacy
14182902Skmacyvoid vmx_hardware_unsetup(void);
15182902Skmacyint vmx_check_processor_compat(void);
16182902Skmacyint vmx_hardware_enable(void);
17182902Skmacyvoid vmx_hardware_disable(void);
18182902Skmacyint vmx_vm_init(struct kvm *kvm);
19182902Skmacyvoid vmx_vm_destroy(struct kvm *kvm);
20182902Skmacyint vmx_vcpu_precreate(struct kvm *kvm);
21182902Skmacyint vmx_vcpu_create(struct kvm_vcpu *vcpu);
22182902Skmacyint vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
23182902Skmacyfastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
24182902Skmacyvoid vmx_vcpu_free(struct kvm_vcpu *vcpu);
25182902Skmacyvoid vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
26182902Skmacyvoid vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
27182902Skmacyvoid vmx_vcpu_put(struct kvm_vcpu *vcpu);
28182902Skmacyint vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
29182902Skmacyvoid vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
30182902Skmacyint vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
31182902Skmacyvoid vmx_update_emulated_instruction(struct kvm_vcpu *vcpu);
32182902Skmacyint vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
33182902Skmacy#ifdef CONFIG_KVM_SMM
34204972Sjhbint vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
35182902Skmacyint vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram);
36182902Skmacyint vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
37182902Skmacyvoid vmx_enable_smi_window(struct kvm_vcpu *vcpu);
38182902Skmacy#endif
39182902Skmacyint vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
40182902Skmacy				  void *insn, int insn_len);
41182902Skmacyint vmx_check_intercept(struct kvm_vcpu *vcpu,
42182902Skmacy			struct x86_instruction_info *info,
43182902Skmacy			enum x86_intercept_stage stage,
44182902Skmacy			struct x86_exception *exception);
45182902Skmacybool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
46182902Skmacyvoid vmx_migrate_timers(struct kvm_vcpu *vcpu);
47182902Skmacyvoid vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
48182902Skmacyvoid vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
49182902Skmacybool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
50182902Skmacyvoid vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
51182902Skmacyvoid vmx_hwapic_isr_update(int max_isr);
52182902Skmacybool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
53182902Skmacyint vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
54182902Skmacyvoid vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
55222813Sattilio			   int trig_mode, int vector);
56182902Skmacyvoid vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
57182902Skmacybool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
58182902Skmacyvoid vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
59182902Skmacyvoid vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
60182902Skmacyvoid vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
61182902Skmacyint vmx_get_msr_feature(struct kvm_msr_entry *msr);
62182902Skmacyint vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
63182902Skmacyu64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
64182902Skmacyvoid vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
65182902Skmacyvoid vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
66182902Skmacyint vmx_get_cpl(struct kvm_vcpu *vcpu);
67241498Salcvoid vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
68182902Skmacybool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
69182902Skmacyvoid vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
70182902Skmacyvoid vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
71182902Skmacyvoid vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
72182902Skmacybool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
73182902Skmacyint vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
74182902Skmacyvoid vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
75182902Skmacyvoid vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
76182902Skmacyvoid vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
77182902Skmacyvoid vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
78182902Skmacyvoid vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
79214631Sjhbvoid vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
80182902Skmacyvoid vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
81182902Skmacyunsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
82182902Skmacyvoid vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
83182902Skmacybool vmx_get_if_flag(struct kvm_vcpu *vcpu);
84182902Skmacyvoid vmx_flush_tlb_all(struct kvm_vcpu *vcpu);
85182902Skmacyvoid vmx_flush_tlb_current(struct kvm_vcpu *vcpu);
86182902Skmacyvoid vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr);
87182902Skmacyvoid vmx_flush_tlb_guest(struct kvm_vcpu *vcpu);
88255040Sgibbsvoid vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
89186557Skmacyu32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
90186557Skmacyvoid vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall);
91186557Skmacyvoid vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected);
92182902Skmacyvoid vmx_inject_nmi(struct kvm_vcpu *vcpu);
93182902Skmacyvoid vmx_inject_exception(struct kvm_vcpu *vcpu);
94255158Sgibbsvoid vmx_cancel_injection(struct kvm_vcpu *vcpu);
95255158Sgibbsint vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection);
96182902Skmacyint vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
97255158Sgibbsbool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
98255158Sgibbsvoid vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
99255158Sgibbsvoid vmx_enable_nmi_window(struct kvm_vcpu *vcpu);
100255158Sgibbsvoid vmx_enable_irq_window(struct kvm_vcpu *vcpu);
101255158Sgibbsvoid vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr);
102255331Sgibbsvoid vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu);
103255331Sgibbsvoid vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
104255331Sgibbsvoid vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
105255331Sgibbsint vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
106255331Sgibbsint vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
107255331Sgibbsu8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
108255331Sgibbsvoid vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
109255158Sgibbs		       u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
110255331Sgibbsu64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
111255331Sgibbsu64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
112255331Sgibbsvoid vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
113255158Sgibbsvoid vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
114255158Sgibbsvoid vmx_request_immediate_exit(struct kvm_vcpu *vcpu);
115255158Sgibbsvoid vmx_sched_in(struct kvm_vcpu *vcpu, int cpu);
116255158Sgibbsvoid vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
117255158Sgibbs#ifdef CONFIG_X86_64
118255158Sgibbsint vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
119255158Sgibbs		     bool *expired);
120255158Sgibbsvoid vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
121255158Sgibbs#endif
122255331Sgibbsvoid vmx_setup_mce(struct kvm_vcpu *vcpu);
123255331Sgibbs
124255331Sgibbs#endif /* __KVM_X86_VMX_X86_OPS_H */
125255331Sgibbs