Lines Matching defs:write
74 * Need not split huge page now, just set write-proect pte bit
75 * Split huge page until next write fault
351 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
356 * slot to be write protected
358 * Walks bits set in mask write protects the associated pte's. Caller must
449 * If dirty page logging is enabled, write protect all pages in the slot
514 /* Fill new pte if write protected or page migrated */
520 * _PAGE_WRITE for map_page_fast if next page write fault
556 * @write: Whether the fault was due to a write.
564 * -EFAULT on failure due to absent GPA mapping or write to
567 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
591 if (write && !kvm_pte_dirty(new)) {
599 * Do not set write permission when dirty logging is
638 unsigned long hva, bool write)
643 if (kvm_slot_dirty_track_enabled(memslot) && write)
785 * @write: Whether the fault was due to a write.
796 * -EFAULT if there is no memory region at @gpa or a write was
800 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
814 err = kvm_map_page_fast(vcpu, gpa, write);
820 if (kvm_is_error_hva(hva) || (write && !writeable)) {
850 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
887 if (write)
893 if (!fault_supports_huge_mapping(memslot, hva, write)) {
915 } else if (kvm_pte_huge(*ptep) && write)
934 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
938 ret = kvm_map_page(vcpu, gpa, write);