Lines Matching refs:memslot

60  * Bit 63 of the memslot generation number is an "update in-progress flag",
63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
74 * memslot update is in-progress, and to prevent cache hits *after* updating
393 * The most recently used memslot by this vCPU and the slots generation
396 * thousands of years, even assuming 1M memslot operations per second.
569 * Since at idle each memslot belongs to two memslot sets it has to contain
572 * Two memslot sets (one active and one inactive) are necessary so the VM
573 * continues to run on one memslot set while the other is being modified.
575 * These two memslot sets normally point to the same set of memslots.
576 * They can, however, be desynchronized when performing a memslot management
577 * operation by replacing the memslot to be modified by its copy.
578 * After the operation is complete, both memslot sets once again point to
579 * the same, common set of memslot data.
615 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
617 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
620 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
622 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
624 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
726 * The mapping table from slot id to memslot.
731 * always result in higher memory usage (even for lower memslot counts).
756 /* The two memslot sets - active and inactive (per address space) */
758 /* The current active memslot set for each address space */
1053 #define kvm_for_each_memslot(memslot, bkt, slots) \
1054 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1055 if (WARN_ON_ONCE(!memslot->npages)) { \
1160 /* Iterate over each memslot at least partially intersecting [start, end) range */
1298 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1381 * still hold a read lock on kvm->scru for the memslot checks.
1438 const struct kvm_memory_slot *memslot);
1465 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1470 int *is_dirty, struct kvm_memory_slot **memslot);
1698 * Returns a pointer to the memslot if it contains gfn.
1714 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1716 * With "approx" set returns the memslot also when the address falls
1824 if (!gpc->memslot)
1827 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
2288 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2290 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2291 !(memslot->flags & KVM_MEMSLOT_INVALID));