/linux-master/arch/arm64/kvm/ |
H A D | guest.c | 1100 length -= PAGE_SIZE;
|
H A D | mmu.c | 87 * of blocks into PAGE_SIZE PTEs. It assumes the range is already 340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; 536 start = ALIGN_DOWN(__pa(from), PAGE_SIZE); 538 for (cur = start; cur < end; cur += PAGE_SIZE) { 556 start = ALIGN_DOWN(__pa(from), PAGE_SIZE); 558 for (cur = start; cur < end; cur += PAGE_SIZE) { 590 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 594 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr, 647 * The allocated size is always a multiple of PAGE_SIZE. 702 size = PAGE_SIZE * [all...] |
H A D | nested.c | 79 switch (PAGE_SIZE) { 96 switch (PAGE_SIZE) {
|
H A D | pkvm.c | 85 * this is unmapped from the host stage-2, and fallback to PAGE_SIZE. 91 hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
|
H A D | reset.c | 287 if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K) 291 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at 296 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); 299 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); 302 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
H A D | stacktrace.c | 53 unsigned long high = low + PAGE_SIZE; 64 unsigned long high = low + PAGE_SIZE;
|
/linux-master/arch/arm64/kvm/hyp/nvhe/ |
H A D | ffa.c | 189 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) { 305 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE)) 325 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE)) 371 if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) 436 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { 544 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { 755 if (min_rxtx_sz > PAGE_SIZE) 759 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 761 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 765 .len = PAGE_SIZE * [all...] |
H A D | hyp.lds.S | 24 . = ALIGN(PAGE_SIZE);
|
H A D | mem_protect.c | 70 * The size of concatenated PGDs is always a power of two of PAGE_SIZE, 74 WARN_ON(size != (PAGE_SIZE << get_order(size))); 174 WARN_ON(size != (PAGE_SIZE << get_order(size))); 186 hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE)); 202 memset(addr, 0, PAGE_SIZE); 635 u64 size = tx->nr_pages * PAGE_SIZE; 645 u64 size = tx->nr_pages * PAGE_SIZE; 655 u64 size = tx->nr_pages * PAGE_SIZE; 665 u64 size = tx->nr_pages * PAGE_SIZE; 676 u64 size = tx->nr_pages * PAGE_SIZE; [all...] |
H A D | mm.c | 56 /* The allocated size is always a multiple of PAGE_SIZE */ 125 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 129 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE, 156 start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE); 302 return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker); 311 ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr); 315 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE, 333 start = ALIGN_DOWN(start, PAGE_SIZE); 336 end = ALIGN(end, PAGE_SIZE); 366 size = PAGE_SIZE * [all...] |
H A D | page_alloc.c | 39 addr ^= (PAGE_SIZE << order); 100 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
|
H A D | pkvm.c | 586 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
|
H A D | setup.c | 198 return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP); 209 return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
|
H A D | stacktrace.c | 31 stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE); 57 unsigned long low = high - PAGE_SIZE;
|
H A D | tlb.c | 149 * the same level, assume the worst case as PAGE_SIZE 151 stride = PAGE_SIZE;
|
/linux-master/arch/arm64/kvm/hyp/ |
H A D | pgtable.c | 328 .start = ALIGN_DOWN(addr, PAGE_SIZE), 329 .addr = ALIGN_DOWN(addr, PAGE_SIZE), 372 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE), 373 PAGE_SIZE, &walker); 494 .phys = ALIGN_DOWN(phys, PAGE_SIZE), 1085 .phys = ALIGN_DOWN(phys, PAGE_SIZE), 1562 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1585 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1614 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
/linux-master/arch/arm64/kvm/hyp/vhe/ |
H A D | tlb.c | 164 * the same level, assume the worst case as PAGE_SIZE 166 stride = PAGE_SIZE;
|
/linux-master/arch/arm64/lib/ |
H A D | clear_page.S | 26 tst x0, #(PAGE_SIZE - 1) 35 tst x0, #(PAGE_SIZE - 1)
|
H A D | copy_page.S | 33 tst x0, #(PAGE_SIZE - 1)
|
H A D | mte.S | 34 tst x0, #(PAGE_SIZE - 1) 55 tst x0, #(PAGE_SIZE - 1) 60 tst x0, #(PAGE_SIZE - 1) 78 tst x2, #(PAGE_SIZE - 1) 152 tst x0, #(PAGE_SIZE - 1) 173 tst x0, #(PAGE_SIZE - 1)
|
/linux-master/arch/arm64/mm/ |
H A D | contpte.c | 41 unsigned long last_addr = addr + PAGE_SIZE * (nr - 1); 61 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) { 71 __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); 113 folio_start = addr - (page - &folio->page) * PAGE_SIZE; 114 folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE; 318 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) 339 PAGE_SIZE, true, 3); 395 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) 400 PAGE_SIZE, true, 3);
|
H A D | fault.c | 157 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, 397 } else if (addr < PAGE_SIZE) {
|
H A D | fixmap.c | 130 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 152 dt_phys_base = round_down(dt_phys, PAGE_SIZE); 153 offset = dt_phys % PAGE_SIZE; 157 create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); 166 if (offset + *size > PAGE_SIZE) {
|
H A D | hugetlbpage.c | 104 *pgsize = PAGE_SIZE; 136 *pgsize = PAGE_SIZE;
|
H A D | init.c | 404 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|