Searched refs:PMD_SIZE (Results 1 - 25 of 135) sorted by last modified time

123456

/linux-master/arch/x86/mm/
H A Dfault.c272 addr += PMD_SIZE) {
H A Dmem_encrypt_amd.c169 vaddr += PMD_SIZE;
170 paddr += PMD_SIZE;
171 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
H A Dident_map.c11 for (; addr < end; addr += PMD_SIZE) {
/linux-master/arch/x86/virt/svm/
H A Dsev.c168 if (IS_ALIGNED(pa, PMD_SIZE))
187 pa = ALIGN_DOWN(pa, PMD_SIZE);
188 if (e820__mapped_any(pa, pa + PMD_SIZE, E820_TYPE_RAM)) {
190 e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
191 e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
192 e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
/linux-master/kernel/bpf/
H A Dcore.c892 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
895 #ifdef PMD_SIZE
896 /* PMD_SIZE is really big for some archs. It doesn't make sense to
898 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
/linux-master/arch/s390/mm/
H A Dhugetlbpage.c137 size = PMD_SIZE;
206 else if (sz == PMD_SIZE)
248 if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
H A Dgmap.c356 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
414 if ((to | len) & (PMD_SIZE - 1))
421 for (off = 0; off < len; off += PMD_SIZE)
446 if ((from | to | len) & (PMD_SIZE - 1))
454 for (off = 0; off < len; off += PMD_SIZE) {
720 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
738 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
/linux-master/arch/x86/kernel/
H A Dsev-shared.c1100 unsigned long vaddr_end = vaddr + PMD_SIZE;
H A Dsev.c831 if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
832 (vaddr_end - vaddr) >= PMD_SIZE) {
834 vaddr += PMD_SIZE;
H A Dhead64.c112 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
233 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
236 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
/linux-master/arch/riscv/mm/
H A Dinit.c210 * Make sure we align the reservation on PMD_SIZE since we will
215 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
461 if (sz == PMD_SIZE) {
680 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
681 return PMD_SIZE;
801 PMD_SIZE, PAGE_KERNEL_EXEC);
803 set_satp_mode_pmd + PMD_SIZE,
804 set_satp_mode_pmd + PMD_SIZE,
[all...]
H A Dtlbflush.c190 else if (stride_size >= PMD_SIZE)
191 stride_size = PMD_SIZE;
212 start, end - start, PMD_SIZE); local
H A Dpgtable.c119 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
/linux-master/arch/riscv/include/asm/
H A Dpgtable.h95 #define MAX_FDT_SIZE PMD_SIZE
97 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
/linux-master/mm/
H A Dhugetlb.c7501 BUG_ON(sz != PMD_SIZE);
7568 else if (hp_size == PMD_SIZE)
7569 return PUD_SIZE - PMD_SIZE;
7580 if (huge_page_size(h) == PMD_SIZE)
7581 return PUD_SIZE - PMD_SIZE;
H A Dhuge_memory.c860 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
2013 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
H A Dvmalloc.c136 if ((end - addr) != PMD_SIZE)
139 if (!IS_ALIGNED(addr, PMD_SIZE))
142 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
3768 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3928 * If @size is greater than or equal to PMD_SIZE, allow using
H A Dmemory.c325 * by PMD_SIZE below? no, end can't go down to 0 there.
335 addr += PMD_SIZE;
345 end -= PMD_SIZE;
394 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
/linux-master/arch/arc/mm/
H A Dtlb.c624 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
630 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
/linux-master/arch/x86/kvm/mmu/
H A Dmmu.c1396 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1397 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
/linux-master/arch/arm64/mm/
H A Dhugetlbpage.c60 case PMD_SIZE:
109 *pgsize = PMD_SIZE;
128 case PMD_SIZE:
132 *pgsize = PMD_SIZE;
296 } else if (sz == PMD_SIZE) {
340 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
363 case PMD_SIZE:
364 return PUD_SIZE - PMD_SIZE;
366 return PMD_SIZE - CONT_PTE_SIZE;
383 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
[all...]
/linux-master/arch/arm64/kvm/
H A Dmmu.c96 n += DIV_ROUND_UP(range, PMD_SIZE);
1278 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1295 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1301 if (sz < PMD_SIZE)
1308 return PMD_SIZE;
1336 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1337 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1338 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1455 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
[all...]
/linux-master/arch/loongarch/include/asm/
H A Dpgtable.h29 #define PMD_SIZE (1UL << PMD_SHIFT) macro
30 #define PMD_MASK (~(PMD_SIZE-1))
34 #define PMD_SIZE (1UL << PMD_SHIFT) macro
35 #define PMD_MASK (~(PMD_SIZE-1))
96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
100 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
/linux-master/arch/sparc/mm/
H A Dsrmmu.c709 if (start > (0xffffffffUL - PMD_SIZE))
711 start = (start + PMD_SIZE) & PMD_MASK;
744 if (start > (0xffffffffUL - PMD_SIZE))
746 start = (start + PMD_SIZE) & PMD_MASK;
801 if (srmmu_probe(addr + PMD_SIZE) == probed)
829 start += PMD_SIZE;
/linux-master/arch/powerpc/mm/book3s64/
H A Dradix_pgtable.c100 if (map_page_size == PMD_SIZE) {
163 if (map_page_size == PMD_SIZE) {
323 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
325 mapping_size = PMD_SIZE;
676 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
678 return !vmemmap_populated(start, PMD_SIZE);
771 if (IS_ALIGNED(addr, PMD_SIZE) &&
772 IS_ALIGNED(next, PMD_SIZE)) {
774 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
[all...]

Completed in 752 milliseconds

123456