Searched refs:page (Results 276 - 300 of 3141) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/diag/
H A Drsc_dump.h26 struct page *page, int *size);
/linux-master/kernel/dma/
H A Ddummy.c14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, argument
H A Dcontiguous.c30 * inaccessible to page system even if device drivers don't use it.
40 #include <asm/page.h>
304 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
323 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
329 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
346 * Note that it bypass one-page size of allocations from the per-numa and
347 * global area as the addresses within one page are always contiguous, so
351 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
368 struct page *page; local
401 dma_free_contiguous(struct device *dev, struct page *page, size_t size) argument
[all...]
/linux-master/arch/alpha/include/asm/
H A Dasm-prototypes.h5 #include <asm/page.h>
/linux-master/fs/freevxfs/
H A Dvxfs_extern.h45 extern struct page * vxfs_get_page(struct address_space *, u_long);
46 extern void vxfs_put_page(struct page *);
/linux-master/drivers/xen/
H A Dxlate_mmu.c40 #include <xen/page.h>
48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
52 struct page *page; local
57 page = pages[i / XEN_PFN_PER_PAGE];
58 xen_pfn = page_to_xen_pfn(page);
71 struct page **pages;
99 struct page *page = info->pages[info->index++]; local
100 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), inf
275 struct page *page = r->pages[r->i]; local
[all...]
/linux-master/fs/squashfs/
H A Dpage_actor.c17 * page cache.
62 /* Implementation of page_actor for decompressing directly into page cache. */
71 (actor->next_index != actor->page[actor->next_page]->index)) {
80 actor->last_page = actor->page[actor->next_page];
81 return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
106 struct page **page, int pages, int length)
124 actor->page = page;
128 actor->next_index = page[
105 squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, struct page **page, int pages, int length) argument
[all...]
H A Dfile.c365 void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) argument
370 pageaddr = kmap_atomic(page);
375 flush_dcache_page(page);
377 SetPageUptodate(page);
380 /* Copy data into page cache */
381 void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, argument
384 struct inode *inode = page->mapping->host;
387 int start_index = page
420 squashfs_readpage_fragment(struct page *page, int expected) argument
440 squashfs_readpage_sparse(struct page *page, int expected) argument
448 struct page *page = &folio->page; local
496 squashfs_readahead_fragment(struct page **page, unsigned int pages, unsigned int expected) argument
[all...]
/linux-master/arch/arm64/kernel/
H A Delfcore.c23 /* Derived from dump_user_range(); start/end must be page-aligned */
32 struct page *page = get_dump_page(addr); local
36 * page table entry that would otherwise have been filled with
37 * the zero page. Skip the equivalent tag dump which would
40 if (!page) {
49 if (!page_mte_tagged(page)) {
50 put_page(page);
58 put_page(page);
64 mte_save_page_tags(page_address(page), tag
[all...]
H A Dhibernate.c177 * Copies length bytes, starting at src_start into an new page,
182 * overwriting the kernel text. This function generates a new set of page
186 * page system.
196 void *page = (void *)get_safe_page(GFP_ATOMIC); local
201 if (!page)
204 memcpy(page, src_start, length);
205 caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
206 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
211 *phys_dst_addr = virt_to_phys(page);
220 save_tags(struct page *page, unsigned long pfn) argument
268 struct page *page = pfn_to_online_page(pfn); local
300 struct page *page = pfn_to_online_page(pfn); local
[all...]
/linux-master/mm/
H A Dpage_reporting.c38 MODULE_PARM_DESC(page_reporting_order, "Set page reporting order");
59 /* request page reporting */
86 /* notify prdev of free page reporting request */
112 * free lists/areas. We assume at least one page is populated.
115 struct page *page = sg_page(sg); local
116 int mt = get_pageblock_migratetype(page);
119 __putback_isolated_page(page, order, mt);
126 * If page was not comingled with another page w
153 struct page *page, *next; local
[all...]
H A Dcompaction.c6 * this heavily depends upon page migration to do all the real heavy
20 #include <linux/page-isolation.h>
85 struct page *page, *next; local
89 list_for_each_entry_safe(page, next, &freepages[order], lru) {
92 list_del(&page->lru);
96 post_alloc_hook(page, order, __GFP_MOVABLE);
98 split_page(page, order);
101 list_add(&page->lru, &tmp_list);
102 page
115 struct page *page, *next; local
135 PageMovable(struct page *page) argument
150 __SetPageMovable(struct page *page, const struct movable_operations *mops) argument
158 __ClearPageMovable(struct page *page) argument
239 isolation_suitable(struct compact_control *cc, struct page *page) argument
313 pageblock_skip_persistent(struct page *page) argument
330 struct page *page = pfn_to_online_page(pfn); local
470 test_and_set_skip(struct compact_control *cc, struct page *page) argument
507 update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) argument
521 isolation_suitable(struct compact_control *cc, struct page *page) argument
527 pageblock_skip_persistent(struct page *page) argument
532 update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) argument
541 test_and_set_skip(struct compact_control *cc, struct page *page) argument
615 struct page *page; local
900 struct page *page = NULL, *valid_page = NULL; local
1416 suitable_migration_source(struct compact_control *cc, struct page *page) argument
1436 suitable_migration_target(struct compact_control *cc, struct page *page) argument
1518 struct page *page; local
1568 struct page *page = NULL; local
1737 struct page *page; local
1915 struct page *page = &dst->page; local
2106 struct page *page; local
[all...]
/linux-master/drivers/gpu/drm/v3d/
H A Dv3d_mmu.c8 * a single level of page tables for the V3D's 4GB address space to
12 * Because the 4MB of contiguous memory for page tables is precious,
89 u32 page = bo->node.start; local
102 v3d->pt[page++] = pte + i;
105 WARN_ON_ONCE(page - bo->node.start !=
116 u32 page; local
118 for (page = bo->node.start; page < bo->node.start + npages; page++)
119 v3d->pt[page]
[all...]
/linux-master/arch/xtensa/include/asm/
H A Dcacheflush.h14 #include <asm/page.h>
89 * Pages can get remapped. Because this might change the 'color' of that page,
131 static inline void flush_dcache_page(struct page *page) argument
133 flush_dcache_folio(page_folio(page));
158 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
160 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
167 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
174 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-master/include/net/page_pool/
H A Dtypes.h43 struct page *cache[PP_ALLOC_CACHE_SIZE];
47 * struct page_pool_params - page pool parameters
76 void (*init_callback)(struct page *page, void *arg);
103 * @cached: recycling placed page in the page pool cache
104 * @cache_full: page pool cache was full
105 * @ring: page placed into the ptr ring
106 * @ring_full: page released from page poo
[all...]
/linux-master/tools/perf/
H A Dbuiltin-help.c125 static void exec_woman_emacs(const char *path, const char *page) argument
133 if (asprintf(&man_page, "(woman \"%s\")", page) > 0) {
141 static void exec_man_konqueror(const char *path, const char *page) argument
164 if (asprintf(&man_page, "man:%s(1)", page) > 0) {
172 static void exec_man_man(const char *path, const char *page) argument
176 execlp(path, "man", page, NULL);
180 static void exec_man_cmd(const char *cmd, const char *page) argument
184 if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
337 * system-wide paths after ours to find the manual page. If
347 static void exec_viewer(const char *name, const char *page) argument
366 const char *page = cmd_to_page(perf_cmd); local
383 const char *page = cmd_to_page(perf_cmd); local
389 get_html_page_path(char **page_path, const char *page) argument
419 const char *page = cmd_to_page(perf_cmd); local
[all...]
/linux-master/kernel/module/
H A Ddecompress.c19 struct page **new_pages;
34 static struct page *module_get_next_page(struct load_info *info)
36 struct page *page; local
45 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
46 if (!page)
49 info->pages[info->used_pages++] = page;
50 return page;
115 struct page *page local
174 struct page *page = module_get_next_page(info); local
258 struct page *page = module_get_next_page(info); local
[all...]
/linux-master/include/linux/
H A Dgfp.h144 * There is only one page-allocator function, and two main namespaces to
145 * it. The alloc_page*() variants return 'struct page *' and as such
146 * can allocate highmem pages, the *get*page*() variants return
147 * virtual kernel addresses to the allocated page(s).
199 static inline void arch_free_page(struct page *page, int order) { } argument
202 static inline void arch_alloc_page(struct page *page, int order) { } argument
205 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
216 struct page **page_arra
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c41 const struct nvkm_vmm_page *page)
49 const struct nvkm_vmm_desc *pair = page[-1].desc;
58 pgt->page = page ? page->shift : 0;
73 const struct nvkm_vmm_page *page; member in struct:nvkm_vmm_iter
200 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
299 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, argument
40 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse, const struct nvkm_vmm_page *page) argument
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
632 const struct nvkm_vmm_page *page = vmm->func->page; local
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) argument
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) argument
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) argument
1049 const struct nvkm_vmm_page *page = vmm->func->page; local
1096 const struct nvkm_vmm_page *page = func->page; local
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, u64 addr, u64 size, u8 page, bool map) argument
1276 const struct nvkm_vmm_page *page = vmm->func->page; local
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; local
1613 const struct nvkm_vmm_page *page = vmm->func->page; local
1708 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE]; local
1843 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) argument
1856 const struct nvkm_vmm_page *page = &vmm->func->page[refd]; local
1864 const struct nvkm_vmm_page *page = vmm->func->page; local
1872 const struct nvkm_vmm_page *page = vmm->func->page; local
1926 const struct nvkm_vmm_page *page = vmm->func->page; local
[all...]
/linux-master/drivers/dma-buf/heaps/
H A Dsystem_heap.c52 * by reducing TLB pressure and time spent updating page tables.
204 struct page *page = sg_page_iter_page(&piter); local
206 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
221 struct page **pages = vmalloc(sizeof(struct page *) * npages);
222 struct page **tmp = pages;
293 struct page *page = sg_page(sg); local
295 __free_pages(page, compound_orde
317 struct page *page; local
347 struct page *page, *tmp_page; local
[all...]
/linux-master/drivers/mtd/devices/
H A Dblock2mtd.c50 static struct page *page_read(struct address_space *mapping, pgoff_t index)
59 struct page *page; local
60 pgoff_t index = to >> PAGE_SHIFT; // page index
66 page = page_read(mapping, index);
67 if (IS_ERR(page))
68 return PTR_ERR(page);
70 max = page_address(page) + PAGE_SIZE;
71 for (p=page_address(page); p<max; p++)
73 lock_page(page);
109 struct page *page; local
142 struct page *page; local
[all...]
/linux-master/include/trace/events/
H A Dkmem.h138 TP_PROTO(struct page *page, unsigned int order),
140 TP_ARGS(page, order),
148 __entry->pfn = page_to_pfn(page);
152 TP_printk("page=%p pfn=0x%lx order=%d",
160 TP_PROTO(struct page *page),
162 TP_ARGS(page),
169 __entry->pfn = page_to_pfn(page);
172 TP_printk("page
[all...]
/linux-master/fs/reiserfs/
H A Dioctl.c150 int reiserfs_commit_write(struct file *f, struct page *page,
161 struct page *page; local
193 * we unpack by finding the page with the tail, and calling
194 * __reiserfs_write_begin on that page. This will force a
199 page = grab_cache_page(mapping, index);
201 if (!page) {
204 retval = __reiserfs_write_begin(page, write_from, 0);
208 /* conversion can change page content
[all...]
/linux-master/arch/sh/mm/
H A Dcache.c60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, argument
64 struct folio *folio = page_folio(page);
68 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
78 flush_cache_page(vma, vaddr, page_to_pfn(page));
81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, argument
85 struct folio *folio = page_folio(page);
89 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
99 void copy_user_highpage(struct page *t
128 clear_user_highpage(struct page *page, unsigned long vaddr) argument
158 __flush_anon_page(struct page *page, unsigned long vmaddr) argument
243 flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) argument
[all...]
/linux-master/kernel/
H A Dkexec_core.c44 #include <asm/page.h>
61 * others it is still a simple predictable page table to setup.
74 * page of memory is necessary, but some architectures require more.
87 * destination page in its final resting place (if it happens
92 * - allocating a page table with the control code buffer identity
104 static struct page *kimage_alloc_page(struct kimage *image,
121 * Since the kernel does everything in page size chunks ensure
122 * the destination addresses are page aligned. Too many
125 * simply because addresses are changed to page size
267 static struct page *kimage_alloc_page
294 kimage_free_pages(struct page *page) argument
310 struct page *page, *next; local
479 struct page *page; local
507 kimage_add_page(struct kimage *image, unsigned long page) argument
540 struct page *page; local
594 kimage_dst_used(struct kimage *image, unsigned long page) argument
635 struct page *page; local
731 struct page *page; local
802 struct page *page; local
[all...]

Completed in 392 milliseconds

<<11121314151617181920>>