Lines Matching defs:pages

109 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
191 * pages are allocated, in order to allow large sparse files.
195 static inline int shmem_acct_blocks(unsigned long flags, long pages)
201 pages * VM_ACCT(PAGE_SIZE));
204 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
207 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
210 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
216 if (shmem_acct_blocks(info->flags, pages))
222 sbinfo->max_blocks, pages))
225 err = dquot_alloc_block_nodirty(inode, pages);
227 percpu_counter_sub(&sbinfo->used_blocks, pages);
231 err = dquot_alloc_block_nodirty(inode, pages);
239 shmem_unacct_blocks(info->flags, pages);
243 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
249 dquot_free_block_nodirty(inode, pages);
252 percpu_counter_sub(&sbinfo->used_blocks, pages);
253 shmem_unacct_blocks(info->flags, pages);
415 * @alloced: the change in number of pages allocated to inode
416 * @swapped: the change in number of pages swapped from inode
419 * undirtied hole pages behind our back.
452 bool shmem_charge(struct inode *inode, long pages)
456 if (shmem_inode_acct_blocks(inode, pages))
461 mapping->nrpages += pages;
464 shmem_recalc_inode(inode, pages, 0);
468 void shmem_uncharge(struct inode *inode, long pages)
470 /* pages argument is currently unused: keep it to help debugging */
511 * disables huge pages for the mount;
513 * enables huge pages for the mount;
515 * only allocate huge pages if the page will be fully within i_size,
518 * only allocate huge pages if requested with fadvise()/madvise();
847 * Determine (in bytes) how many of the shmem object's pages mapped by the
880 * Determine (in bytes) how many of the shmem object's pages mapped by the
913 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
961 * Remove range of pages and swap entries from page cache, and free them.
1090 * that we pick up the new sub pages.
1199 /* unmap again to remove racily COWed private pages */
1312 * Move the swapped pages for an inode to page cache. Returns the count
1313 * of pages swapped in, or the error in case of failure.
1447 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1641 long pages;
1648 pages = HPAGE_PMD_NR;
1667 pages = 1;
1680 index + pages - 1, XA_PRESENT)) {
1693 error = shmem_inode_acct_blocks(inode, pages);
1703 * And do a shmem_recalc_inode() to account for freed pages:
1707 freed = pages + info->alloced - info->swapped -
1714 error = shmem_inode_acct_blocks(inode, pages);
1721 shmem_recalc_inode(inode, pages, 0);
1740 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
2182 * faulting pages into the hole while it's being punched. Although
3010 * i_size must be checked after we know the pages are Uptodate.
3119 /* No need to unmap again: hole-punching leaves COWed pages */
3158 * info->fallocend is only relevant when huge pages might be
3160 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
4946 * suit tmpfs, since it may have pages in swapcache, and needs to find those