Lines Matching defs:gpc

28 	struct gfn_to_pfn_cache *gpc;
31 list_for_each_entry(gpc, &kvm->gpc_list, list) {
32 read_lock_irq(&gpc->lock);
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
36 gpc->uhva >= start && gpc->uhva < end) {
37 read_unlock_irq(&gpc->lock);
47 write_lock_irq(&gpc->lock);
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
49 gpc->uhva >= start && gpc->uhva < end)
50 gpc->valid = false;
51 write_unlock_irq(&gpc->lock);
55 read_unlock_irq(&gpc->lock);
73 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
75 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
77 if (!gpc->active)
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
87 if (kvm_is_error_hva(gpc->uhva))
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
93 if (!gpc->valid)
137 * is not protected by gpc->lock. It is guaranteed to
138 * be elevated before the mmu_notifier acquires gpc->lock, and
155 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
163 lockdep_assert_held(&gpc->refresh_lock);
165 lockdep_assert_held_write(&gpc->lock);
168 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
172 gpc->valid = false;
175 mmu_seq = gpc->kvm->mmu_invalidate_seq;
178 write_unlock_irq(&gpc->lock);
201 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
208 * too must be done outside of gpc->lock!
210 if (new_pfn == gpc->pfn)
220 write_lock_irq(&gpc->lock);
226 WARN_ON_ONCE(gpc->valid);
227 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
229 gpc->valid = true;
230 gpc->pfn = new_pfn;
231 gpc->khva = new_khva + offset_in_page(gpc->uhva);
243 write_lock_irq(&gpc->lock);
248 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
262 lockdep_assert_held(&gpc->refresh_lock);
264 write_lock_irq(&gpc->lock);
266 if (!gpc->active) {
271 old_pfn = gpc->pfn;
272 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
273 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
278 gpc->gpa = INVALID_GPA;
279 gpc->memslot = NULL;
280 gpc->uhva = PAGE_ALIGN_DOWN(uhva);
282 if (gpc->uhva != old_uhva)
285 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
289 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
290 kvm_is_error_hva(gpc->uhva)) {
293 gpc->gpa = gpa;
294 gpc->generation = slots->generation;
295 gpc->memslot = __gfn_to_memslot(slots, gfn);
296 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
298 if (kvm_is_error_hva(gpc->uhva)) {
307 if (gpc->uhva != old_uhva)
310 gpc->uhva = old_uhva;
315 gpc->uhva += page_offset;
321 if (!gpc->valid || hva_change) {
322 ret = hva_to_pfn_retry(gpc);
326 * But do update gpc->khva because the offset within the page
329 gpc->khva = old_khva + page_offset;
341 gpc->valid = false;
342 gpc->pfn = KVM_PFN_ERR_FAULT;
343 gpc->khva = NULL;
347 unmap_old = (old_pfn != gpc->pfn);
350 write_unlock_irq(&gpc->lock);
358 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
362 guard(mutex)(&gpc->refresh_lock);
364 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
372 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
374 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
377 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
379 rwlock_init(&gpc->lock);
380 mutex_init(&gpc->refresh_lock);
382 gpc->kvm = kvm;
383 gpc->pfn = KVM_PFN_ERR_FAULT;
384 gpc->gpa = INVALID_GPA;
385 gpc->uhva = KVM_HVA_ERR_BAD;
386 gpc->active = gpc->valid = false;
389 static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
392 struct kvm *kvm = gpc->kvm;
397 guard(mutex)(&gpc->refresh_lock);
399 if (!gpc->active) {
400 if (KVM_BUG_ON(gpc->valid, kvm))
404 list_add(&gpc->list, &kvm->gpc_list);
412 write_lock_irq(&gpc->lock);
413 gpc->active = true;
414 write_unlock_irq(&gpc->lock);
416 return __kvm_gpc_refresh(gpc, gpa, uhva);
419 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
428 return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
431 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
433 return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
436 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
438 struct kvm *kvm = gpc->kvm;
442 guard(mutex)(&gpc->refresh_lock);
444 if (gpc->active) {
448 * until gpc->lock is dropped and refresh is guaranteed to fail.
450 write_lock_irq(&gpc->lock);
451 gpc->active = false;
452 gpc->valid = false;
460 old_khva = gpc->khva - offset_in_page(gpc->khva);
461 gpc->khva = NULL;
463 old_pfn = gpc->pfn;
464 gpc->pfn = KVM_PFN_ERR_FAULT;
465 write_unlock_irq(&gpc->lock);
468 list_del(&gpc->list);