Lines Matching refs:ghc

3489 				       struct gfn_to_hva_cache *ghc,
3498 /* Update ghc->generation before performing any error checks. */
3499 ghc->generation = slots->generation;
3502 ghc->hva = KVM_HVA_ERR_BAD;
3511 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3512 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3514 if (kvm_is_error_hva(ghc->hva))
3520 ghc->hva += offset;
3522 ghc->memslot = NULL;
3524 ghc->gpa = gpa;
3525 ghc->len = len;
3529 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3533 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3537 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3543 gpa_t gpa = ghc->gpa + offset;
3545 if (WARN_ON_ONCE(len + offset > ghc->len))
3548 if (slots->generation != ghc->generation) {
3549 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3553 if (kvm_is_error_hva(ghc->hva))
3556 if (unlikely(!ghc->memslot))
3559 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3562 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3568 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3571 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3575 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3581 gpa_t gpa = ghc->gpa + offset;
3583 if (WARN_ON_ONCE(len + offset > ghc->len))
3586 if (slots->generation != ghc->generation) {
3587 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3591 if (kvm_is_error_hva(ghc->hva))
3594 if (unlikely(!ghc->memslot))
3597 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3605 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3608 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);