Searched refs:page (Results 126 - 150 of 3148) sorted by last modified time

1234567891011>>

/linux-master/include/linux/
H A Ddma-direct.h141 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
144 struct page *page, dma_addr_t dma_addr,
H A Dbuffer_head.h45 struct page;
52 * within a page, and of course as the unit of I/O through the
56 * a page (via a page_mapping) and for wrapping bio submission
61 struct buffer_head *b_this_page;/* circular list of page's buffers */
63 struct page *b_page; /* the page this bh is mapped to */
69 char *b_data; /* pointer to data within the page */
78 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
80 * buffers in the page */
179 /* If we *know* page
[all...]
H A Dbpf.h2254 unsigned long nr_pages, struct page **page_array);
/linux-master/include/asm-generic/
H A Dpgalloc.h11 * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
15 * anything beyond simple page allocation.
32 * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
45 * pte_free_kernel - free PTE-level kernel page table memory
47 * @pte: pointer to the memory containing the page table
55 * __pte_alloc_one - allocate memory for a PTE-level user page table
59 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
62 * anything beyond simple page allocation or must have custom GFP flags.
64 * Return: `struct page` referencing the ptdesc or %NULL on error
84 * pte_alloc_one - allocate a page fo
[all...]
H A Dio.h10 #include <asm/page.h> /* I/O is all done through memory accesses */
/linux-master/fs/smb/client/
H A Dinode.c834 void *page = alloc_dentry_path(); local
838 free_dentry_path(page);
852 path = build_path_from_dentry(dentry, page);
888 free_dentry_path(page);
1751 void *page; local
1774 page = alloc_dentry_path();
1783 full_path = build_path_from_dentry(dentry, page);
1866 free_dentry_path(page);
2040 void *page; local
2055 page
2118 void *page = alloc_dentry_path(); local
2538 void *page; local
2722 struct page *page; local
2843 void *page = alloc_dentry_path(); local
2999 void *page = alloc_dentry_path(); local
[all...]
/linux-master/fs/proc/
H A Dtask_mmu.c372 * page is divided by the number of processes sharing it. So if a
380 * A shift of 12 before division means (assuming 4K page size):
443 static void smaps_account(struct mem_size_stats *mss, struct page *page, argument
447 struct folio *folio = page_folio(page);
448 int i, nr = compound ? compound_nr(page) : 1;
453 * of the compound page.
472 * differ page-by-page.
474 * refcount == 1 guarantees the page i
533 struct page *page = NULL; local
580 struct page *page = NULL; local
1086 struct page *page; local
1412 struct page *page = NULL; local
1477 struct page *page = NULL; local
1789 struct page *page; local
1851 struct page *page; local
2557 gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) argument
2588 struct page *page; local
2613 struct page *page; local
2646 struct page *page; local
2663 struct page *page = can_gather_numa_stats(ptent, vma, addr); local
2679 struct page *page; local
[all...]
H A Dpage.c17 #include <linux/kernel-page-flags.h>
40 /* /proc/kpagecount - an array exposing page counts
43 * physical page count.
50 struct page *ppage;
99 /* /proc/kpageflags - an array exposing page flags
102 * physical page flags.
110 u64 stable_page_flags(const struct page *page) argument
120 * it differentiates a memory hole from a page with no flags
122 if (!page)
253 struct page *page = pfn_to_online_page(pfn); local
[all...]
H A Dmeminfo.c21 #include <asm/page.h>
/linux-master/fs/ocfs2/cluster/
H A Dtcp.c18 * Handlers for unsolicited messages are registered. Each socket has a page
21 * page. This page is destroyed after the handler call, so it can't be
84 mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
414 struct page *page = NULL; local
417 page = alloc_page(GFP_NOFS);
419 if (sc == NULL || page == NULL)
443 sc->sc_page = page;
446 page
[all...]
/linux-master/fs/nfsd/
H A Dvfs.c977 struct page *page = buf->page; // may be a compound one local
979 struct page *last_page;
981 last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
982 for (page += offset / PAGE_SIZE; page <= last_page; page++) {
984 * Skip page replacement when extending the contents of the
985 * current page
1091 struct page *page; local
[all...]
H A Dnfs4xdr.c138 * buffer might end on a page boundary.
2140 * is 64k). Since there is no kvec- or page-based interface to xattrs,
2151 struct page **pages = xdr->pages;
4374 * page length; reset it so as not to confuse
5410 * We're done, with a length that wasn't page
/linux-master/fs/hugetlbfs/
H A Dinode.c87 * Mask used when checking the page offset value passed in via system
121 * page based offset in vm_pgoff could be sufficiently large to
131 /* must be huge page aligned */
271 * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
277 static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) argument
283 page = nth_page(page, offset / PAGE_SIZE);
286 if (is_raw_hwpoison_page_in_hugepage(page))
297 page
395 hugetlbfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
416 hugetlb_vma_maps_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
474 struct page *page = &folio->page; local
[all...]
/linux-master/fs/f2fs/
H A Ddata.c51 bool f2fs_is_cp_guaranteed(struct page *page) argument
53 struct address_space *mapping = page->mapping;
69 page_private_gcing(page))
74 static enum count_type __read_io_type(struct page *page) argument
76 struct address_space *mapping = page_file_mapping(page);
133 * things for each compressed page here: call f2fs_end_read_compressed_page()
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
145 struct page *pag local
194 struct page *page = bv->bv_page; local
248 struct page *page = bv->bv_page; local
338 struct page *page = bvec->bv_page; local
544 __has_merged_page(struct bio *bio, struct inode *inode, struct page *page, nid_t ino) argument
637 __submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type, bool force) argument
667 f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type) argument
688 struct page *page = fio->encrypted_page ? local
752 add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio, struct page *page, enum temp_type temp) argument
776 add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) argument
824 f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, struct bio **bio, struct page *page) argument
885 struct page *page = fio->encrypted_page ? local
1083 f2fs_submit_page_read(struct inode *inode, struct page *page, block_t blkaddr, blk_opf_t op_flags, bool for_write) argument
1206 struct page *page; local
1281 struct page *page; local
1312 struct page *page; local
1340 struct page *page; local
1801 struct page *page; local
2045 f2fs_read_single_page(struct inode *inode, struct page *page, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, bool is_readahead) argument
2177 struct page *page = cc->rpages[i]; local
2250 struct page *page = dic->cpages[i]; local
2326 f2fs_mpage_readpages(struct inode *inode, struct readahead_control *rac, struct page *page) argument
2439 struct page *page = &folio->page; local
2477 struct page *mpage, *page; local
2616 struct page *page = fio->page; local
2732 f2fs_write_single_data_page(struct page *page, int *submitted, struct bio **bio, sector_t *last_block, struct writeback_control *wbc, enum iostat_type io_type, int compr_blocks, bool allow_balance) argument
2905 f2fs_write_data_page(struct page *page, struct writeback_control *wbc) argument
3039 struct page *page = pages[i]; local
3340 prepare_write_begin(struct f2fs_sb_info *sbi, struct page *page, loff_t pos, unsigned len, block_t *blk_addr, bool *node_changed) argument
3480 prepare_atomic_write_begin(struct f2fs_sb_info *sbi, struct page *page, loff_t pos, unsigned int len, block_t *blk_addr, bool *node_changed, bool *use_cow) argument
3528 struct page *page = NULL; local
3654 f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
3858 struct page *page; local
4083 f2fs_clear_page_cache_dirty_tag(struct page *page) argument
[all...]
/linux-master/fs/ext4/
H A Dpage-io.c3 * linux/fs/ext4/page-io.c
143 fscrypt_free_bounce_page(&io_folio->page);
322 /* BIO completion function for page writeback */
424 wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
448 * in multiples of the page size. For a file that is not a multiple of
449 * the page size, the remaining memory is zeroed when mapped, and
505 * into a bounce page. For simplicity, just encrypt until the last
513 struct page *bounce_page;
516 * Since bounce page allocation uses a mempool, we can only use
518 * first page o
[all...]
H A Dmove_extent.c169 /* Force page buffers uptodate w/o dropping page's lock */
232 * move_extent_per_page - Move extent data per page
236 * @orig_page_offset: page index on original file
237 * @donor_page_offset: page index on donor file
313 * hold page's lock, if it is still the case data copy is not
392 block_commit_write(&folio[0]->page, from, from + replaced_size);
H A Dinode.c979 * dirty so that writeback code knows about this page (and inode) contains
1002 * the bit before releasing a page lock and thus writeback cannot
1119 struct page **pagep, void **fsdata)
1164 * The same as page allocation, we prealloc buffer heads before
1198 ret = __block_write_begin(&folio->page, pos, len,
1201 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1244 *pagep = &folio->page;
1272 struct page *page, void *fsdata)
1274 struct folio *folio = page_folio(page);
1269 ext4_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
1373 ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
3001 ext4_da_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
3836 struct page *page; local
[all...]
H A Dext4.h3557 struct page **pagep);
3563 struct page **pagep,
3743 /* page-io.c */
3754 int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *page,
/linux-master/fs/
H A Dexec.c14 * "current->executable", and page faults do the actual loading. Clean.
199 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
202 struct page *page; local
229 &page, NULL);
237 return page;
240 static void put_arg_page(struct page *page) argument
242 put_page(page);
250 struct page *pag
249 flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) argument
323 struct page *page; local
336 put_arg_page(struct page *page) argument
356 flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) argument
593 struct page *page; local
649 struct page *page; local
1744 struct page *page; local
[all...]
H A Ddax.c33 /* We choose 4096 entries - same as per-zone page wait tables */
37 /* The 'colour' (ie low bits) within a PMD of a page offset. */
56 * and two more to tell us if the entry is a zero page or an empty entry that
121 * DAX page cache entry locking
200 * Look up entry in page cache, wait for it to become unlocked if it
323 static inline bool dax_page_is_shared(struct page *page) argument
325 return page->mapping == PAGE_MAPPING_DAX_SHARED;
329 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
332 static inline void dax_page_share_get(struct page *pag argument
346 dax_page_share_put(struct page *page) argument
367 struct page *page = pfn_to_page(pfn); local
388 struct page *page = pfn_to_page(pfn); local
407 struct page *page = pfn_to_page(pfn); local
490 dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, struct page **page) argument
682 struct page *page = NULL; local
[all...]
H A Dbuffer.c170 buffer_io_error(bh, ", lost sync page write");
181 * But it's the page lock which protects the buffers. To get around this,
186 * may be quite high. This code could TryLock the page, and if that
223 /* we might be here because some of the buffers on this page are
260 buffer_io_error(bh, ", async page read");
267 * decide that the page is now completely done.
391 buffer_io_error(bh, ", lost async page write");
420 * If a page's buffers are under async readin (end_buffer_async_read
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
428 * The page come
963 alloc_page_buffers(struct page *page, unsigned long size, bool retry) argument
2174 __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) argument
2229 struct page *page; local
2248 block_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
2282 generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
2473 struct page *page; local
2499 struct page *page; local
2594 block_commit_write(struct page *page, unsigned from, unsigned to) argument
[all...]
/linux-master/drivers/xen/
H A Dswiotlb-xen.c18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
34 #include <xen/page.h>
142 /* Align the allocation to the Xen page size */
194 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, argument
199 phys_addr_t map, phys = page_to_phys(page) + offset;
H A Dpvcalls-back.c325 void *page; local
338 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
341 map->ring = page;
351 (1 << map->ring_order), &page);
354 map->bytes = page;
1061 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1064 pr_warn("%s write out 'max-page-order' failed\n", __func__);
H A Dgrant-dma-ops.c145 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
160 struct page *vaddr, dma_addr_t dma_handle,
166 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, argument
193 pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
/linux-master/drivers/virtio/
H A Dvirtio_mem.c187 * GiB of memory in one 4 KiB page.
199 * in one 4 KiB page.
274 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
1124 struct page *page = pfn_to_page(pfn); local
1126 __SetPageOffline(page);
1128 SetPageDirty(page);
1130 ClearPageReserved(page);
1144 struct page *pag local
1170 struct page *page = pfn_to_page(pfn + i); local
1240 struct page *page; local
1275 virtio_mem_online_page(struct virtio_mem *vm, struct page *page, unsigned int order) argument
1334 virtio_mem_online_page_cb(struct page *page, unsigned int order) argument
2141 struct page *page; local
2224 struct page *page; local
[all...]

Completed in 328 milliseconds

1234567891011>>