Searched refs:page (Results 276 - 300 of 3144) sorted by relevance

<<11121314151617181920>>

/linux-master/arch/powerpc/mm/
H A Dpageattr.c14 #include <asm/page.h>
26 * Updates the attributes of a page atomically.
29 * attributes of a page currently being executed or accessed.
89 * On hash, the linear mapping is not in the Linux page table so
108 void __kernel_map_pages(struct page *page, int numpages, int enable) argument
111 unsigned long addr = (unsigned long)page_address(page);
113 if (PageHighMem(page))
117 err = hash__kernel_map_pages(page, numpages, enable);
/linux-master/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusergf119.c76 u32 kind, page; local
94 "create gf100 dma vers %d page %d kind %02x\n",
95 args->v0.version, args->v0.page, args->v0.kind);
97 page = args->v0.page;
102 page = GF119_DMA_V0_PAGE_SP;
105 page = GF119_DMA_V0_PAGE_LP;
110 if (page > 1)
112 dmaobj->flags0 = (kind << 20) | (page << 6);
/linux-master/include/xen/
H A Dxen-front-pgdir-shbuf.h4 * Xen frontend/backend page directory based shared buffer
26 * of grant references for the page directory and the pages
30 * of grant references for the page directory itself as grant
39 * Number of pages for the shared buffer itself (excluding the page
47 struct page **pages;
64 struct page **pages;
/linux-master/arch/powerpc/include/asm/
H A Dasync_tx.h13 struct page **dst_lst, int dst_cnt, struct page **src_lst,
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_dmem.h42 unsigned long nouveau_dmem_page_addr(struct page *page);
/linux-master/include/linux/mlx5/
H A Drsc_dump.h50 struct page *page, int *size);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/diag/
H A Drsc_dump.h26 struct page *page, int *size);
/linux-master/kernel/dma/
H A Ddummy.c14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, argument
/linux-master/arch/alpha/include/asm/
H A Dasm-prototypes.h5 #include <asm/page.h>
/linux-master/fs/freevxfs/
H A Dvxfs_extern.h45 extern struct page * vxfs_get_page(struct address_space *, u_long);
46 extern void vxfs_put_page(struct page *);
/linux-master/drivers/xen/
H A Dxlate_mmu.c40 #include <xen/page.h>
48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
52 struct page *page; local
57 page = pages[i / XEN_PFN_PER_PAGE];
58 xen_pfn = page_to_xen_pfn(page);
71 struct page **pages;
99 struct page *page = info->pages[info->index++]; local
100 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), inf
275 struct page *page = r->pages[r->i]; local
[all...]
/linux-master/fs/squashfs/
H A Dpage_actor.c17 * page cache.
62 /* Implementation of page_actor for decompressing directly into page cache. */
71 (actor->next_index != actor->page[actor->next_page]->index)) {
80 actor->last_page = actor->page[actor->next_page];
81 return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
106 struct page **page, int pages, int length)
124 actor->page = page;
128 actor->next_index = page[
105 squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, struct page **page, int pages, int length) argument
[all...]
H A Dfile.c365 void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) argument
370 pageaddr = kmap_atomic(page);
375 flush_dcache_page(page);
377 SetPageUptodate(page);
379 SetPageError(page);
382 /* Copy data into page cache */
383 void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, argument
386 struct inode *inode = page
422 squashfs_readpage_fragment(struct page *page, int expected) argument
442 squashfs_readpage_sparse(struct page *page, int expected) argument
450 struct page *page = &folio->page; local
500 squashfs_readahead_fragment(struct page **page, unsigned int pages, unsigned int expected) argument
[all...]
/linux-master/arch/arm64/kernel/
H A Delfcore.c23 /* Derived from dump_user_range(); start/end must be page-aligned */
32 struct page *page = get_dump_page(addr); local
36 * page table entry that would otherwise have been filled with
37 * the zero page. Skip the equivalent tag dump which would
40 if (!page) {
49 if (!page_mte_tagged(page)) {
50 put_page(page);
58 put_page(page);
64 mte_save_page_tags(page_address(page), tag
[all...]
H A Dhibernate.c177 * Copies length bytes, starting at src_start into an new page,
182 * overwriting the kernel text. This function generates a new set of page
186 * page system.
196 void *page = (void *)get_safe_page(GFP_ATOMIC); local
201 if (!page)
204 memcpy(page, src_start, length);
205 caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
206 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
211 *phys_dst_addr = virt_to_phys(page);
220 save_tags(struct page *page, unsigned long pfn) argument
268 struct page *page = pfn_to_online_page(pfn); local
300 struct page *page = pfn_to_online_page(pfn); local
[all...]
/linux-master/mm/
H A Dpage_reporting.c38 MODULE_PARM_DESC(page_reporting_order, "Set page reporting order");
59 /* request page reporting */
86 /* notify prdev of free page reporting request */
112 * free lists/areas. We assume at least one page is populated.
115 struct page *page = sg_page(sg); local
116 int mt = get_pageblock_migratetype(page);
119 __putback_isolated_page(page, order, mt);
126 * If page was not comingled with another page w
153 struct page *page, *next; local
[all...]
/linux-master/include/net/page_pool/
H A Dtypes.h43 struct page *cache[PP_ALLOC_CACHE_SIZE];
47 * struct page_pool_params - page pool parameters
74 void (*init_callback)(struct page *page, void *arg);
101 * @cached: recycling placed page in the page pool cache
102 * @cache_full: page pool cache was full
103 * @ring: page placed into the ptr ring
104 * @ring_full: page released from page poo
[all...]
/linux-master/drivers/gpu/drm/v3d/
H A Dv3d_mmu.c8 * a single level of page tables for the V3D's 4GB address space to
12 * Because the 4MB of contiguous memory for page tables is precious,
89 u32 page = bo->node.start; local
102 v3d->pt[page++] = pte + i;
105 WARN_ON_ONCE(page - bo->node.start !=
116 u32 page; local
118 for (page = bo->node.start; page < bo->node.start + npages; page++)
119 v3d->pt[page]
[all...]
/linux-master/arch/s390/kernel/
H A Duv.c88 * Requests the Ultravisor to pin the page in the shared state. This will
89 * cause an intercept when the guest attempts to unshare the pinned page.
106 * Requests the Ultravisor to destroy a guest page and make it
107 * accessible to the host. The destroy clears the page instead of
110 * @paddr: Absolute host address of page to be destroyed
123 * page. Let us emulate the newer variant (no-op).
133 * The caller must already hold a reference to the page
137 struct page *page = phys_to_page(paddr); local
140 get_page(page);
172 struct page *page = phys_to_page(paddr); local
189 expected_page_refs(struct page *page) argument
204 make_page_secure(struct page *page, struct uv_cb_header *uvcb) argument
280 struct page *page; local
386 struct page *page; local
434 arch_make_page_accessible(struct page *page) argument
[all...]
/linux-master/fs/ceph/
H A Daddr.c30 * The page->private field is used to reference a struct
31 * ceph_snap_context for _every_ dirty page. This indicates which
32 * snapshot the page was logically dirtied in, and thus which snap
37 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
57 * Invalidate and so forth must take care to ensure the dirty page
69 static inline struct ceph_snap_context *page_snap_context(struct page *page) argument
71 if (PagePrivate(page))
72 return (void *)page->private;
77 * Dirty a page
501 ceph_set_page_fscache(struct page *page) argument
523 ceph_set_page_fscache(struct page *page) argument
603 get_writepages_data_length(struct inode *inode, struct page *page, u64 start) argument
641 writepage_nounlock(struct page *page, struct writeback_control *wbc) argument
790 ceph_writepage(struct page *page, struct writeback_control *wbc) argument
828 struct page *page; local
1020 struct page *page; local
1239 struct page *page = ceph_fscrypt_pagecache_page(pages[i]); local
1364 struct page *page; local
1426 ceph_find_incompatible(struct page *page) argument
1645 struct page *page; local
1690 struct page *page = vmf->page; local
1791 struct page *page; local
[all...]
/linux-master/tools/perf/
H A Dbuiltin-help.c125 static void exec_woman_emacs(const char *path, const char *page) argument
133 if (asprintf(&man_page, "(woman \"%s\")", page) > 0) {
141 static void exec_man_konqueror(const char *path, const char *page) argument
164 if (asprintf(&man_page, "man:%s(1)", page) > 0) {
172 static void exec_man_man(const char *path, const char *page) argument
176 execlp(path, "man", page, NULL);
180 static void exec_man_cmd(const char *cmd, const char *page) argument
184 if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
337 * system-wide paths after ours to find the manual page. If
347 static void exec_viewer(const char *name, const char *page) argument
366 const char *page = cmd_to_page(perf_cmd); local
383 const char *page = cmd_to_page(perf_cmd); local
389 get_html_page_path(char **page_path, const char *page) argument
419 const char *page = cmd_to_page(perf_cmd); local
[all...]
/linux-master/arch/sh/mm/
H A Dcache.c60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, argument
64 struct folio *folio = page_folio(page);
68 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
78 flush_cache_page(vma, vaddr, page_to_pfn(page));
81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, argument
85 struct folio *folio = page_folio(page);
87 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
89 void *vfrom = kmap_coherent(page, vadd
128 clear_user_highpage(struct page *page, unsigned long vaddr) argument
158 __flush_anon_page(struct page *page, unsigned long vmaddr) argument
243 flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) argument
[all...]
/linux-master/kernel/module/
H A Ddecompress.c19 struct page **new_pages;
34 static struct page *module_get_next_page(struct load_info *info)
36 struct page *page; local
45 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
46 if (!page)
49 info->pages[info->used_pages++] = page;
50 return page;
115 struct page *page local
174 struct page *page = module_get_next_page(info); local
258 struct page *page = module_get_next_page(info); local
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c41 const struct nvkm_vmm_page *page)
49 const struct nvkm_vmm_desc *pair = page[-1].desc;
58 pgt->page = page ? page->shift : 0;
73 const struct nvkm_vmm_page *page; member in struct:nvkm_vmm_iter
200 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
299 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, argument
40 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse, const struct nvkm_vmm_page *page) argument
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
632 const struct nvkm_vmm_page *page = vmm->func->page; local
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
1049 const struct nvkm_vmm_page *page = vmm->func->page; local
1096 const struct nvkm_vmm_page *page = func->page; local
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, u64 addr, u64 size, u8 page, bool map) argument
1276 const struct nvkm_vmm_page *page = vmm->func->page; local
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; local
1613 const struct nvkm_vmm_page *page = vmm->func->page; local
1708 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE]; local
1843 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) argument
1856 const struct nvkm_vmm_page *page = &vmm->func->page[refd]; local
1864 const struct nvkm_vmm_page *page = vmm->func->page; local
1872 const struct nvkm_vmm_page *page = vmm->func->page; local
1926 const struct nvkm_vmm_page *page = vmm->func->page; local
[all...]
/linux-master/drivers/dma-buf/heaps/
H A Dsystem_heap.c52 * by reducing TLB pressure and time spent updating page tables.
204 struct page *page = sg_page_iter_page(&piter); local
206 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
221 struct page **pages = vmalloc(sizeof(struct page *) * npages);
222 struct page **tmp = pages;
293 struct page *page = sg_page(sg); local
295 __free_pages(page, compound_orde
317 struct page *page; local
347 struct page *page, *tmp_page; local
[all...]

Completed in 188 milliseconds

<<11121314151617181920>>