Lines Matching refs:end

354 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
552 u64 end;
606 if (WARN_ON_ONCE(range->end <= range->start))
621 range->start, range->end - 1) {
626 hva_end = min_t(unsigned long, range->end,
643 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
672 unsigned long end,
679 .end = end,
692 unsigned long end,
698 .end = end,
712 * surrounded by invalidate_range_{start,end}(), which is currently
736 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
766 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
774 kvm->mmu_invalidate_range_end = end;
788 max(kvm->mmu_invalidate_range_end, end);
794 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
804 .end = range->end,
811 trace_kvm_unmap_hva_range(range->start, range->end);
835 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
868 * Assert that at least one range was added between start() and end().
880 .end = range->end,
908 unsigned long end)
910 trace_kvm_age_hva(start, end);
912 return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
919 unsigned long end)
921 trace_kvm_age_hva(start, end);
936 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
1372 * notifier between a start() and end(), then there shouldn't be any
1997 gfn_t start, gfn_t end)
2001 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
2435 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2438 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2449 has_attrs = !xas_find(&xas, end - 1);
2454 for (index = start; index < end; index++) {
2495 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2500 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2501 if (gfn_range.start >= gfn_range.end)
2531 * least one range between begin() and end(), e.g. allows KVM to detect
2536 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2541 /* Set @attributes for the gfn range [@start, @end). */
2542 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2547 .end = end,
2555 .end = end,
2570 if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
2577 for (i = start; i < end; i++) {
2585 for (i = start; i < end; i++) {
2601 gfn_t start, end;
2614 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2623 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
3794 ktime_t end, bool success)
3797 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3992 * Set at the beginning and cleared at the end of interception/PLE handler.