Lines Matching refs:kvm

40 #include <linux/kvm.h>
54 * used in kvm, other bits are visible for userspace which are defined in
193 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
195 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
196 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
233 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
235 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
237 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
273 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
274 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
275 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
276 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
328 struct kvm *kvm;
334 int vcpu_idx; /* index into kvm->vcpu_array */
655 struct kvm *kvm, int irq_source_id, int level,
688 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
698 static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
714 static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
737 struct kvm {
749 * kvm->srcu critical section where acquiring the slots_lock would
777 * created_vcpus is protected by kvm->lock, and is incremented
850 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
852 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
854 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
856 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
859 pr_err_ratelimited("kvm [%i]: " fmt, \
875 static inline void kvm_vm_dead(struct kvm *kvm)
877 kvm->vm_dead = true;
878 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
881 static inline void kvm_vm_bugged(struct kvm *kvm)
883 kvm->vm_bugged = true;
884 kvm_vm_dead(kvm);
888 #define KVM_BUG(cond, kvm, fmt...) \
892 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
893 kvm_vm_bugged(kvm); \
897 #define KVM_BUG_ON(cond, kvm) \
901 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
902 kvm_vm_bugged(kvm); \
914 #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
920 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
921 kvm_vm_bugged(kvm); \
931 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
936 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
944 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
946 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
949 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
951 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
952 lockdep_is_held(&kvm->slots_lock) ||
953 !refcount_read(&kvm->users_count));
956 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
958 int num_vcpus = atomic_read(&kvm->online_vcpus);
963 return xa_load(&kvm->vcpu_array, i);
966 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
967 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
968 (atomic_read(&kvm->online_vcpus) - 1))
970 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
978 vcpu = kvm_get_vcpu(kvm, id);
981 kvm_for_each_vcpu(i, vcpu, kvm)
987 void kvm_destroy_vcpus(struct kvm *kvm);
993 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
994 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
996 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
999 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
1020 void kvm_get_kvm(struct kvm *kvm);
1021 bool kvm_get_kvm_safe(struct kvm *kvm);
1022 void kvm_put_kvm(struct kvm *kvm);
1024 void kvm_put_kvm_no_destroy(struct kvm *kvm);
1026 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
1029 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
1030 lockdep_is_held(&kvm->slots_lock) ||
1031 !refcount_read(&kvm->users_count));
1034 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
1036 return __kvm_memslots(kvm, 0);
1043 return __kvm_memslots(vcpu->kvm, as_id);
1051 bool kvm_are_all_memslots_empty(struct kvm *kvm);
1184 int kvm_set_memory_region(struct kvm *kvm,
1186 int __kvm_set_memory_region(struct kvm *kvm,
1188 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1189 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1190 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1194 void kvm_arch_commit_memory_region(struct kvm *kvm,
1199 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1201 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1207 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1208 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1209 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1216 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1217 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1231 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1233 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1234 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1236 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1239 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1241 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1243 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1245 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1248 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1251 #define __kvm_get_guest(kvm, gfn, offset, v) \
1253 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1262 #define kvm_get_guest(kvm, gpa, v) \
1265 struct kvm *__kvm = kvm; \
1271 #define __kvm_put_guest(kvm, gfn, offset, v) \
1273 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1280 mark_page_dirty(kvm, gfn); \
1284 #define kvm_put_guest(kvm, gpa, v) \
1287 struct kvm *__kvm = kvm; \
1293 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1294 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1295 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1298 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1299 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1325 * @kvm: pointer to kvm instance.
1331 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
1345 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
1381 * still hold a read lock on kvm->scru for the memslot checks.
1435 void kvm_flush_remote_tlbs(struct kvm *kvm);
1436 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1437 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1448 void kvm_mmu_invalidate_begin(struct kvm *kvm);
1449 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
1450 void kvm_mmu_invalidate_end(struct kvm *kvm);
1451 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
1459 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1461 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1465 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1468 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1469 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1473 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1475 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1505 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1511 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1530 int kvm_arch_post_init_vm(struct kvm *kvm);
1531 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1532 void kvm_arch_create_vm_debugfs(struct kvm *kvm);
1539 static inline struct kvm *kvm_arch_alloc_vm(void)
1541 return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
1545 static inline void __kvm_arch_free_vm(struct kvm *kvm)
1547 kvfree(kvm);
1551 static inline void kvm_arch_free_vm(struct kvm *kvm)
1553 __kvm_arch_free_vm(kvm);
1558 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1563 int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1567 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
1573 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1577 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1578 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1579 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1581 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1585 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1589 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1595 void kvm_arch_start_assignment(struct kvm *kvm);
1596 void kvm_arch_end_assignment(struct kvm *kvm);
1597 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1599 static inline void kvm_arch_start_assignment(struct kvm *kvm)
1603 static inline void kvm_arch_end_assignment(struct kvm *kvm)
1607 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1642 bool kvm_arch_intc_initialized(struct kvm *kvm);
1644 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1660 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1661 void kvm_arch_destroy_vm(struct kvm *kvm);
1662 void kvm_arch_sync_events(struct kvm *kvm);
1675 int kvm_irq_map_gsi(struct kvm *kvm,
1677 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1679 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1681 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1684 struct kvm *kvm, int irq_source_id,
1686 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1687 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1688 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1689 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1691 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1693 int kvm_request_irq_source_id(struct kvm *kvm);
1694 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1695 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1785 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1787 return gfn_to_memslot(kvm, gfn)->id;
1813 static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
1815 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1827 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
1836 struct kvm *kvm;
2015 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
2017 if (unlikely(kvm->mmu_invalidate_in_progress))
2028 * than under kvm->mmu_lock, for scalability, so can't rely on
2029 * kvm->mmu_lock to keep things ordered.
2032 if (kvm->mmu_invalidate_seq != mmu_seq)
2037 static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
2041 lockdep_assert_held(&kvm->mmu_lock);
2048 if (unlikely(kvm->mmu_invalidate_in_progress)) {
2053 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
2054 kvm->mmu_invalidate_range_end == INVALID_GPA))
2057 if (gfn >= kvm->mmu_invalidate_range_start &&
2058 gfn < kvm->mmu_invalidate_range_end)
2062 if (kvm->mmu_invalidate_seq != mmu_seq)
2073 static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
2085 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
2086 gfn >= kvm->mmu_invalidate_range_start &&
2087 gfn < kvm->mmu_invalidate_range_end)
2090 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
2098 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
2099 int kvm_set_irq_routing(struct kvm *kvm,
2103 int kvm_set_routing_entry(struct kvm *kvm,
2106 void kvm_free_irq_routing(struct kvm *kvm);
2110 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
2114 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2116 void kvm_eventfd_init(struct kvm *kvm);
2117 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2120 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2121 void kvm_irqfd_release(struct kvm *kvm);
2122 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2125 void kvm_irq_routing_update(struct kvm *);
2127 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
2132 static inline void kvm_irqfd_release(struct kvm *kvm) {}
2134 static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2142 void kvm_arch_irq_routing_update(struct kvm *kvm);
2209 struct kvm *kvm;
2219 * create is called holding kvm->lock and any operations not suitable
2227 * outside of holding kvm->lock.
2236 * held by a vcpu or other kvm component that gets destroyed
2246 * the VM. kvm->lock is held.
2305 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2347 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2358 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2360 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
2392 /* Max number of entries allowed for each kvm dirty ring */
2411 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
2413 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
2416 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2418 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
2420 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
2423 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2426 kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
2429 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2436 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
2439 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
2443 KVM_BUG_ON(1, kvm);