Searched refs:page (Results 226 - 250 of 3148) sorted by last modified time

1234567891011>>

/linux-master/arch/powerpc/mm/
H A Dmem.c216 * everything else. GFP_DMA32 page allocations automatically fall back to
227 * paging_init() sets up the page tables - in fact we've already done this.
276 * book3s is limited to 16 page sizes due to encoding this in
306 struct page *page = pfn_to_page(pfn); local
308 free_highmem_page(page);
389 * is valid. The argument is a physical page number.
H A Dfault.c40 #include <asm/page.h>
97 * We don't try to fetch the pkey from page table because reading
98 * page table without locking doesn't guarantee stable pte value.
106 * 2. T1 : set AMR to deny access to pkey=4, touches, page
171 * Kernel page fault interrupted by SIGKILL. We have no reason to
181 * made us unable to handle the page fault gracefully.
205 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
220 pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
243 * page. Only called for current mm, hence foreign == 0
335 * removing the hash page tabl
[all...]
/linux-master/arch/powerpc/mm/book3s64/
H A Dpgtable.c42 * This is called when relaxing access to a hugepage. It's also called in the page
150 * lookup in page tables with local interrupts disabled. For huge pages
152 * pmd_t we want to prevent transit from pmd pointing to page table
153 * to pmd pointing to huge page (and back) while interrupts are disabled.
154 * We clear pmd to possibly replace it with page table pointer in
189 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
208 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
249 pmd_t mk_pmd(struct page *page, pgprot_t pgprot) argument
251 return pfn_pmd(page_to_pfn(page), pgpro
[all...]
/linux-master/arch/powerpc/kernel/
H A Diommu.c347 void *page, unsigned int npages,
366 (unsigned long)page &
571 /* Calculate next page pointer for contiguous check */
694 * Reserve page 0 so it will not be used for any mappings.
695 * This avoids buggy drivers that consider page 0 to be invalid
850 * comprises a page address and offset into that page. The dma_addr_t
851 * returned will point to the same byte within the page as was passed in.
854 struct page *page, unsigne
346 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, unsigned long attrs) argument
853 iommu_map_page(struct device *dev, struct iommu_table *tbl, struct page *page, unsigned long offset, size_t size, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) argument
919 struct page *page; local
[all...]
H A Dfadump.c31 #include <asm/page.h>
779 struct page *page; local
787 page = virt_to_page(vaddr);
789 mark_page_reserved(page + i);
H A Ddma-iommu.c76 * to the dma address (mapping) of the first page.
96 * comprises a page address and offset into that page. The dma_addr_t
97 * returned will point to the same byte within the page as was passed in.
99 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, argument
104 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
144 /* We support DMA to/from any memory page via the iommu */
/linux-master/arch/powerpc/include/asm/
H A Dmmu.h24 /* Radix page table supported and enabled */
144 #include <asm/page.h>
345 /* The kernel use the constants below to index in the page sizes array.
349 * A non supported page size has a "shift" field set to 0
351 * Any new page size being implemented can get a new entry in here. Whether
352 * the kernel will use it or not is a different matter though. The actual page
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dpgtable.h14 * Common bits between hash and Radix page table
24 #define _PAGE_DIRTY 0x00080 /* C: page changed */
25 #define _PAGE_ACCESSED 0x00100 /* R: page referenced */
79 * This is different from Radix page table limitation above and
82 * for hash linux page table specific bits.
90 #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
91 #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
149 * page table defines
160 /* pmd table use page table fragments */
164 * Because of use of pte fragments and THP, size of page tabl
[all...]
/linux-master/arch/parisc/include/asm/
H A Dpage.h18 #define clear_page(page) clear_page_asm((void *)(page))
21 struct page;
24 void clear_page_asm(void *page);
26 #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
27 void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
88 typedef struct page *pgtable_t;
135 /* The size of the gateway page (we leave lots of room for expansion) */
171 #define page_to_phys(page) (page_to_pf
[all...]
/linux-master/arch/mips/mm/
H A Dtlb-r4k.c211 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
220 page &= (PAGE_MASK << 1);
226 write_c0_entryhi(page);
229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
259 void local_flush_tlb_one(unsigned long page) argument
267 page &= (PAGE_MASK << 1);
268 write_c0_entryhi(page);
291 * for the R4k "end of page" hardware bug and does the needy.
328 /* this could be a huge page */
H A Ddma-noncoherent.c47 void arch_dma_prep_coherent(struct page *page, size_t size) argument
49 dma_cache_wback_inv((unsigned long)page_address(page), size);
98 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); local
106 if (PageHighMem(page)) {
111 addr = kmap_atomic(page);
119 page++;
/linux-master/arch/mips/jazz/
H A Djazzdma.c8 * on failure of vdma_alloc() one could leave page #0 unused
67 * Allocate 32k of memory for DMA page tables. This needs to be page
494 struct page *page; local
501 page = alloc_pages(gfp, get_order(size));
502 if (!page)
504 ret = page_address(page);
509 arch_dma_prep_coherent(page, size);
513 __free_pages(page, get_orde
524 jazz_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) argument
[all...]
/linux-master/arch/mips/include/asm/
H A Dpgtable-64.h16 #include <asm/page.h>
29 * Each address space has 2 4K pages as its page directory, giving 1024
31 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
32 * tables. Each page table is also a single 4K page, giving 512 (==
43 /* PGDIR_SHIFT determines what a third-level page table entry can map */
48 /* PMD_SHIFT determines the size of the area a second-level page table can map */
69 * For 4kB page size we use a 3 level page tre
[all...]
H A Dpgtable-32.h13 #include <asm/page.h>
26 * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
28 * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
30 * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
31 * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
32 * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
35 * pointers a PTE page holds, making its last half go to waste. Correspondingly,
36 * we double the number of PGD pages. Overall, page table memory overhead
49 * should be used only for entries matching the actual page table
[all...]
/linux-master/arch/arm64/mm/
H A Dmteswap.c23 int mte_save_tags(struct page *page) argument
27 if (!page_mte_tagged(page))
34 mte_save_page_tags(page_address(page), tag_storage);
36 /* lookup the swap entry.val from the page */
37 ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
50 void mte_restore_tags(swp_entry_t entry, struct page *page) argument
57 if (try_page_mte_tagging(page)) {
58 mte_restore_page_tags(page_address(page), tag
71 __mte_invalidate_tags(struct page *page) argument
[all...]
H A Dfault.c21 #include <linux/page-flags.h>
131 * Dump out the page tables associated with 'addr' in the currently active mm.
660 * this page fault.
817 { do_bad, SIGKILL, SI_KERNEL, "page domain fault" },
918 * Used during anonymous page fault handling.
926 * If the page is mapped with PROT_MTE, initialise the tags at the
927 * point of allocation and page zeroing as this is usually faster than
936 void tag_clear_highpage(struct page *page) argument
938 /* Newly allocated page, should
[all...]
H A Ddma-mapping.c34 void arch_dma_prep_coherent(struct page *page, size_t size) argument
36 unsigned long start = (unsigned long)page_address(page);
H A Dcontpte.c15 * dynamically adding/removing the contig bit can cause page faults.
101 struct page *page; local
111 page = pte_page(pte);
112 folio = page_folio(page);
113 folio_start = addr - (page - &folio->page) * PAGE_SIZE;
306 * access/dirty per folio, not per page. And since we only create a
352 * the tlb. Until the flush, the page may or may not be wrprotected.
374 * folio, not per page
[all...]
/linux-master/arch/arm64/kernel/vdso32/
H A DMakefile100 VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
/linux-master/arch/arm64/include/asm/
H A Dpgtable.h28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
54 * use broadcast TLB invalidation instructions, therefore a spurious page
61 * ZERO_PAGE is a global shared page that is always zero: used
72 * page table entry, taking care of 52-bit addresses.
332 * the page fault mechanism. Checking the dirty status of a pte becomes:
584 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
732 /* Find an entry in the third-level page table. */
745 * Conversion functions: convert a page an
[all...]
H A Dhugetlb.h14 #include <asm/page.h>
/linux-master/arch/arm/vdso/
H A DMakefile19 -z max-page-size=4096 -shared $(ldflags-y) \
/linux-master/arch/arm/mm/
H A Dfault.c15 #include <linux/page-flags.h>
38 * This is useful to dump out the page tables associated with
148 * Oops. The kernel tried to access some page that wasn't present.
194 pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
419 * successfully fix up this page fault.
449 * We enter here because the first level page table doesn't contain
455 * If the init_task's first level page tables contains the relevant
461 * from the master page table, nothing more.
511 * On ARM one Linux PGD entry contains two hardware entries (see page
H A Ddma-mapping.c28 #include <asm/page.h>
55 struct page *page; member in struct:arm_dma_free_args
64 struct page **ret_page);
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) argument
113 if (PageHighMem(page)) {
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
117 void *ptr = kmap_atomic(page);
122 page
145 struct page *page, *p, *e; local
166 __dma_free_buffer(struct page *page, size_t size) argument
204 struct page *page; local
312 struct page *page = virt_to_page((void *)addr); local
319 __dma_remap(struct page *page, size_t size, pgprot_t prot) argument
332 struct page *page; local
398 struct page *page; local
426 __free_from_contiguous(struct device *dev, struct page *page, void *cpu_addr, size_t size, bool want_vaddr) argument
449 struct page *page; local
538 struct page *page = NULL; local
609 struct page *page = phys_to_page(dma_to_phys(dev, handle)); local
627 dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) argument
679 __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) argument
695 __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) argument
869 struct page *page; local
1020 struct page *page; local
1044 struct page *page; local
1368 arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) argument
1410 struct page *page; local
1488 struct page *page; local
1503 struct page *page; local
[all...]
/linux-master/arch/arm/lib/
H A Duaccess_with_memcpy.c20 #include <asm/page.h>
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
55 * Lock the page table for the destination and check
230 struct page *src_page, *dst_page;
247 /* warm up the src page dcache */

Completed in 262 milliseconds

1234567891011>>