/linux-master/arch/x86/kvm/ |
H A D | mmu.h | 284 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) argument 288 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 295 return gfn_to_index(slot->base_gfn + npages - 1, 296 slot->base_gfn, level) + 1;
|
/linux-master/arch/riscv/kvm/ |
H A D | mmu.c | 337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; 338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; local 399 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; 400 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; 425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; 465 if ((new->base_gfn + new->npages) >= 472 base_gpa = new->base_gfn << PAGE_SHIFT;
|
/linux-master/arch/x86/kvm/mmu/ |
H A D | page_track.c | 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); 147 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); 308 n->track_remove_region(slot->base_gfn, slot->npages, n);
|
H A D | tdp_mmu.c | 346 gfn_t base_gfn = sp->gfn; local 355 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 1359 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1360 slot->base_gfn + slot->npages, min_level); 1607 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1608 slot->base_gfn + slot->npages); 1683 gfn_t start = slot->base_gfn;
|
H A D | paging_tmpl.h | 634 gfn_t base_gfn = fault->gfn; local 636 WARN_ON_ONCE(gw->gfn != base_gfn); 731 base_gfn = gfn_round_for_level(fault->gfn, it.level); 737 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, 752 base_gfn, fault->pfn, fault);
|
H A D | mmu.c | 790 idx = gfn_to_index(gfn, slot->base_gfn, level); 1091 idx = gfn_to_index(gfn, slot->base_gfn, level); 1315 slot->base_gfn + gfn_offset, mask, true); 1321 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 1348 slot->base_gfn + gfn_offset, mask, false); 1354 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 1382 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn 1387 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); 1388 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); 3242 gfn_t base_gfn local [all...] |
/linux-master/arch/loongarch/kvm/ |
H A D | mmu.c | 365 gfn_t base_gfn = slot->base_gfn + gfn_offset; local 366 gfn_t start = base_gfn + __ffs(mask); 367 gfn_t end = base_gfn + __fls(mask) + 1; 372 ctx.gfn = base_gfn; 391 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) 396 gpa_start = new->base_gfn << PAGE_SHIFT; 415 * memslot->base_gfn << PAGE_SIZE: 462 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, 463 new->base_gfn [all...] |
/linux-master/arch/powerpc/kvm/ |
H A D | trace_hv.h | 305 __field(u64, base_gfn) 317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 325 __entry->base_gfn, __entry->slot_flags)
|
H A D | book3s_64_mmu_hv.c | 592 if (gfn_base < memslot->base_gfn) 706 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; 828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 879 gfn = memslot->base_gfn; 908 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 979 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1124 if (gfn < memslot->base_gfn || 1125 gfn >= memslot->base_gfn + memslot->npages) 1130 __set_bit_le(gfn - memslot->base_gfn, map); 1205 set_bit_le(gfn - memslot->base_gfn, memslo [all...] |
H A D | book3s_hv_uvmem.c | 261 p->base_pfn = slot->base_gfn; 279 if (p->base_pfn == slot->base_gfn) { 394 unsigned long gfn = memslot->base_gfn; 450 memslot->base_gfn << PAGE_SHIFT, 624 gfn = slot->base_gfn; 797 unsigned long gfn = memslot->base_gfn;
|
H A D | book3s_64_mmu_radix.c | 1069 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1101 unsigned long gfn = memslot->base_gfn + pagenum; 1144 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1192 gpa = memslot->base_gfn << PAGE_SHIFT;
|
H A D | book3s_hv_rm_mmu.c | 104 gfn -= memslot->base_gfn; 142 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); 242 slot_fn = gfn - memslot->base_gfn;
|
H A D | e500_mmu_host.c | 381 slot_start = pfn - (gfn - slot->base_gfn);
|
H A D | book3s_hv_nested.c | 1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; 1673 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
H A D | book3s_pr.c | 1889 ga = memslot->base_gfn << PAGE_SHIFT;
|
/linux-master/arch/arm64/kvm/ |
H A D | mmu.c | 339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; 947 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; 1137 start = memslot->base_gfn << PAGE_SHIFT; 1138 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 1166 start = memslot->base_gfn << PAGE_SHIFT; 1167 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 1189 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; local 1190 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; 1191 phys_addr_t end = (base_gfn [all...] |
/linux-master/virt/kvm/ |
H A D | guest_memfd.c | 63 .start = slot->base_gfn + max(pgoff, start) - pgoff, 64 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff, 488 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
|
H A D | kvm_main.c | 391 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); 1503 if (slot->base_gfn < tmp->base_gfn) 1505 else if (slot->base_gfn > tmp->base_gfn) 1527 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1586 if (old && old->base_gfn == new->base_gfn) { 1804 dest->base_gfn 2025 gfn_t base_gfn; local [all...] |
/linux-master/arch/mips/kvm/ |
H A D | mmu.c | 418 gfn_t base_gfn = slot->base_gfn + gfn_offset; local 419 gfn_t start = base_gfn + __ffs(mask); 420 gfn_t end = base_gfn + __fls(mask);
|
H A D | mips.c | 200 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, 201 slot->base_gfn + slot->npages - 1); 235 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, 236 new->base_gfn + new->npages - 1);
|
/linux-master/include/linux/ |
H A D | kvm_host.h | 588 gfn_t base_gfn; member in struct:kvm_memory_slot 1105 if (start < slot->base_gfn) { 1143 if (iter->slot->base_gfn + iter->slot->npages <= start) 1157 return iter->slot->base_gfn < end; 1707 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) 1730 if (gfn >= slot->base_gfn) { 1731 if (gfn < slot->base_gfn + slot->npages) 1780 unsigned long offset = gfn - slot->base_gfn; 1795 return slot->base_gfn [all...] |
/linux-master/arch/s390/kvm/ |
H A D | pv.c | 261 while (slot && slot->base_gfn < pages_2g) { 262 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; 265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
|
H A D | kvm-s390.c | 678 cur_gfn = memslot->base_gfn; 679 last_gfn = memslot->base_gfn + memslot->npages; 2224 unsigned long ofs = cur_gfn - ms->base_gfn; 2227 if (ms->base_gfn + ms->npages <= cur_gfn) { 2237 if (cur_gfn < ms->base_gfn) 2245 return ms->base_gfn + ofs; 2272 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) 2290 if (cur_gfn - ms->base_gfn >= ms->npages) { 5772 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) 5804 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZ [all...] |
H A D | kvm-s390.h | 250 return ms->base_gfn + ms->npages;
|
/linux-master/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
|