Lines Matching defs:inode

94 /* Pretend that one inode + its dentry occupy this much memory */
102 * inode->i_private (with i_rwsem making sure that it has only one user at
103 * a time): we would prefer not to enlarge the shmem inode just for that.
149 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
210 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
212 struct shmem_inode_info *info = SHMEM_I(inode);
213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
225 err = dquot_alloc_block_nodirty(inode, pages);
231 err = dquot_alloc_block_nodirty(inode, pages);
243 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
245 struct shmem_inode_info *info = SHMEM_I(inode);
246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
249 dquot_free_block_nodirty(inode, pages);
320 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
322 return SHMEM_I(inode)->i_dquot;
327 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
328 * produces a novel ino for the newly allocated inode.
331 * each dentry. However, in that case, no new inode number is needed since that
332 * internally draws from another pool of inode numbers (currently global
361 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
378 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
413 * shmem_recalc_inode - recalculate the block usage of an inode
414 * @inode: inode to recalc
415 * @alloced: the change in number of pages allocated to inode
416 * @swapped: the change in number of pages swapped from inode
421 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
422 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
424 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
426 struct shmem_inode_info *info = SHMEM_I(inode);
433 READ_ONCE(inode->i_mapping->nrpages);
449 shmem_inode_unacct_blocks(inode, freed);
452 bool shmem_charge(struct inode *inode, long pages)
454 struct address_space *mapping = inode->i_mapping;
456 if (shmem_inode_acct_blocks(inode, pages))
464 shmem_recalc_inode(inode, pages, 0);
468 void shmem_uncharge(struct inode *inode, long pages)
473 shmem_recalc_inode(inode, 0, 0);
544 bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
549 if (!S_ISREG(inode->i_mode))
558 switch (SHMEM_SB(inode->i_sb)->huge) {
563 i_size = round_up(i_size_read(inode), PAGE_SIZE);
623 struct inode *inode;
636 /* pin the inode */
637 inode = igrab(&info->vfs_inode);
639 /* inode is about to be evicted */
640 if (!inode) {
646 if (round_up(inode->i_size, PAGE_SIZE) ==
647 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
662 inode = &info->vfs_inode;
664 iput(inode);
672 inode = &info->vfs_inode;
677 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
678 folio = filemap_get_folio(inode->i_mapping, index);
689 * Move the inode on the list back to shrinklist if we failed
704 /* If split failed move the inode on the list back to shrinklist */
714 * Make sure the inode is either on the global list or deleted
716 * in another thread once we put the inode (then the local list
724 iput(inode);
851 * as long as the inode doesn't go away and racy results are not a problem.
884 * as long as the inode doesn't go away and racy results are not a problem.
888 struct inode *inode = file_inode(vma->vm_file);
889 struct shmem_inode_info *info = SHMEM_I(inode);
890 struct address_space *mapping = inode->i_mapping;
904 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
932 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
940 folio = filemap_get_entry(inode->i_mapping, index);
945 if (folio->mapping == inode->i_mapping)
956 shmem_get_folio(inode, index, &folio, SGP_READ);
964 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
967 struct address_space *mapping = inode->i_mapping;
968 struct shmem_inode_info *info = SHMEM_I(inode);
1019 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1034 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1109 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1112 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1114 shmem_undo_range(inode, lstart, lend, false);
1115 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1116 inode_inc_iversion(inode);
1124 struct inode *inode = path->dentry->d_inode;
1125 struct shmem_inode_info *info = SHMEM_I(inode);
1127 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1128 shmem_recalc_inode(inode, 0, 0);
1139 generic_fillattr(idmap, request_mask, inode, stat);
1141 if (shmem_is_huge(inode, 0, false, NULL, 0))
1156 struct inode *inode = d_inode(dentry);
1157 struct shmem_inode_info *info = SHMEM_I(inode);
1167 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1172 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1173 loff_t oldsize = inode->i_size;
1182 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1186 i_size_write(inode, newsize);
1194 unmap_mapping_range(inode->i_mapping,
1197 shmem_truncate_range(inode,
1201 unmap_mapping_range(inode->i_mapping,
1206 if (is_quota_modification(idmap, inode, attr)) {
1207 error = dquot_initialize(inode);
1213 if (i_uid_needs_update(idmap, attr, inode) ||
1214 i_gid_needs_update(idmap, attr, inode)) {
1215 error = dquot_transfer(idmap, inode, attr);
1220 setattr_copy(idmap, inode, attr);
1222 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1224 inode_set_ctime_current(inode);
1226 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1227 inode_inc_iversion(inode);
1232 static void shmem_evict_inode(struct inode *inode)
1234 struct shmem_inode_info *info = SHMEM_I(inode);
1235 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1238 if (shmem_mapping(inode->i_mapping)) {
1239 shmem_unacct_size(info->flags, inode->i_size);
1240 inode->i_size = 0;
1241 mapping_set_exiting(inode->i_mapping);
1242 shmem_truncate_range(inode, 0, (loff_t)-1);
1252 /* Wait while shmem_unuse() is scanning this inode... */
1264 shmem_free_inode(inode->i_sb, freed);
1265 WARN_ON(inode->i_blocks);
1266 clear_inode(inode);
1268 dquot_free_inode(inode);
1269 dquot_drop(inode);
1312 * Move the swapped pages for an inode to page cache. Returns the count
1315 static int shmem_unuse_swap_entries(struct inode *inode,
1321 struct address_space *mapping = inode->i_mapping;
1328 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1343 * If swap found in inode, free it and move page from swapcache to filecache.
1345 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1347 struct address_space *mapping = inode->i_mapping;
1361 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1391 * Drop the swaplist mutex while searching the inode for swap;
1393 * remove placeholder inode from swaplist, nor let it be freed
1423 struct inode *inode = mapping->host;
1424 struct shmem_inode_info *info = SHMEM_I(inode);
1425 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1473 if (inode->i_private) {
1475 spin_lock(&inode->i_lock);
1476 shmem_falloc = inode->i_private;
1484 spin_unlock(&inode->i_lock);
1498 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1501 * the inode from eviction. But don't unlock the mutex until
1503 * prune a !swapped inode from the swaplist under this mutex.
1512 shmem_recalc_inode(inode, 0, 1);
1635 struct inode *inode, pgoff_t index,
1638 struct address_space *mapping = inode->i_mapping;
1639 struct shmem_inode_info *info = SHMEM_I(inode);
1693 error = shmem_inode_acct_blocks(inode, pages);
1695 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1713 shmem_inode_unacct_blocks(inode, freed);
1714 error = shmem_inode_acct_blocks(inode, pages);
1721 shmem_recalc_inode(inode, pages, 0);
1817 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1820 struct address_space *mapping = inode->i_mapping;
1834 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1835 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
1838 shmem_recalc_inode(inode, -1, -1);
1848 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1853 struct address_space *mapping = inode->i_mapping;
1854 struct shmem_inode_info *info = SHMEM_I(inode);
1923 shmem_recalc_inode(inode, 0, -1);
1939 shmem_set_folio_swapin_error(inode, index, folio, swap);
1959 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1969 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
1976 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
1982 folio = filemap_get_entry(inode->i_mapping, index);
1991 error = shmem_swapin_folio(inode, index, &folio,
2004 if (unlikely(folio->mapping != inode->i_mapping)) {
2039 if (shmem_is_huge(inode, index, false, fault_mm,
2046 inode, index, fault_mm, true);
2055 folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
2067 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2069 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2070 struct shmem_inode_info *info = SHMEM_I(inode);
2112 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2126 shmem_recalc_inode(inode, 0, 0);
2136 * @inode: inode to search
2141 * Looks up the page cache entry at @inode & @index. If a folio is
2157 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2160 return shmem_get_folio_gfp(inode, index, foliop, sgp,
2161 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2193 * and bloating every shmem inode for this unlikely case would be sad.
2195 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2201 spin_lock(&inode->i_lock);
2202 shmem_falloc = inode->i_private;
2215 spin_unlock(&inode->i_lock);
2225 spin_lock(&inode->i_lock);
2228 spin_unlock(&inode->i_lock);
2238 struct inode *inode = file_inode(vmf->vma->vm_file);
2239 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2248 if (unlikely(inode->i_private)) {
2249 ret = shmem_falloc_wait(vmf, inode);
2255 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2358 struct inode *inode = file_inode(vma->vm_file);
2359 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2365 struct inode *inode = file_inode(vma->vm_file);
2369 * Bias interleave by inode number to distribute better across nodes;
2374 *ilx = inode->i_ino;
2376 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2384 /* Bias interleave by inode number to distribute better across nodes */
2401 struct inode *inode = file_inode(file);
2402 struct shmem_inode_info *info = SHMEM_I(inode);
2411 if (!user_shm_lock(inode->i_size, ucounts))
2417 user_shm_unlock(inode->i_size, ucounts);
2429 struct inode *inode = file_inode(file);
2430 struct shmem_inode_info *info = SHMEM_I(inode);
2442 if (inode->i_nlink)
2449 static int shmem_file_open(struct inode *inode, struct file *file)
2452 return generic_file_open(inode, file);
2456 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2462 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2475 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2478 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2484 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2486 return &SHMEM_I(inode)->dir_offsets;
2489 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2491 struct inode *dir, umode_t mode,
2494 struct inode *inode;
2504 inode = new_inode(sb);
2505 if (!inode) {
2510 inode->i_ino = ino;
2511 inode_init_owner(idmap, inode, dir, mode);
2512 inode->i_blocks = 0;
2513 simple_inode_init_ts(inode);
2514 inode->i_generation = get_random_u32();
2515 info = SHMEM_I(inode);
2516 memset(info, 0, (char *)inode - (char *)info);
2521 info->i_crtime = inode_get_mtime(inode);
2525 shmem_set_inode_flags(inode, info->fsflags);
2529 cache_no_acl(inode);
2531 mapping_set_unevictable(inode->i_mapping);
2532 mapping_set_large_folios(inode->i_mapping);
2536 inode->i_op = &shmem_special_inode_operations;
2537 init_special_inode(inode, mode, dev);
2540 inode->i_mapping->a_ops = &shmem_aops;
2541 inode->i_op = &shmem_inode_operations;
2542 inode->i_fop = &shmem_file_operations;
2547 inc_nlink(inode);
2549 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2550 inode->i_op = &shmem_dir_inode_operations;
2551 inode->i_fop = &simple_offset_dir_operations;
2552 simple_offset_init(shmem_get_offset_ctx(inode));
2563 lockdep_annotate_inode_mutex_key(inode);
2564 return inode;
2568 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2569 struct super_block *sb, struct inode *dir,
2573 struct inode *inode;
2575 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2576 if (IS_ERR(inode))
2577 return inode;
2579 err = dquot_initialize(inode);
2583 err = dquot_alloc_inode(inode);
2585 dquot_drop(inode);
2588 return inode;
2591 inode->i_flags |= S_NOQUOTA;
2592 iput(inode);
2596 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2597 struct super_block *sb, struct inode *dir,
2612 struct inode *inode = file_inode(dst_vma->vm_file);
2613 struct shmem_inode_info *info = SHMEM_I(inode);
2614 struct address_space *mapping = inode->i_mapping;
2622 if (shmem_inode_acct_blocks(inode, 1)) {
2690 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2706 shmem_recalc_inode(inode, 1, 0);
2715 shmem_inode_unacct_blocks(inode, 1);
2729 struct inode *inode = mapping->host;
2730 struct shmem_inode_info *info = SHMEM_I(inode);
2740 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2744 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2765 struct inode *inode = mapping->host;
2767 if (pos + copied > inode->i_size)
2768 i_size_write(inode, pos + copied);
2788 struct inode *inode = file_inode(file);
2789 struct address_space *mapping = inode->i_mapping;
2804 loff_t i_size = i_size_read(inode);
2815 error = shmem_get_folio(inode, index, &folio, SGP_READ);
2837 i_size = i_size_read(inode);
2907 struct inode *inode = file->f_mapping->host;
2910 inode_lock(inode);
2922 inode_unlock(inode);
2975 struct inode *inode = file_inode(in);
2976 struct address_space *mapping = inode->i_mapping;
2988 if (*ppos >= i_size_read(inode))
2991 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
3017 isize = i_size_read(inode);
3064 struct inode *inode = mapping->host;
3068 MAX_LFS_FILESIZE, i_size_read(inode));
3072 inode_lock(inode);
3074 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3077 inode_unlock(inode);
3084 struct inode *inode = file_inode(file);
3085 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3086 struct shmem_inode_info *info = SHMEM_I(inode);
3094 inode_lock(inode);
3111 spin_lock(&inode->i_lock);
3112 inode->i_private = &shmem_falloc;
3113 spin_unlock(&inode->i_lock);
3118 shmem_truncate_range(inode, offset, offset + len - 1);
3121 spin_lock(&inode->i_lock);
3122 inode->i_private = NULL;
3125 spin_unlock(&inode->i_lock);
3131 error = inode_newsize_ok(inode, offset + len);
3135 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3153 spin_lock(&inode->i_lock);
3154 inode->i_private = &shmem_falloc;
3155 spin_unlock(&inode->i_lock);
3178 error = shmem_get_folio(inode, index, &folio,
3184 shmem_undo_range(inode,
3222 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3223 i_size_write(inode, offset + len);
3225 spin_lock(&inode->i_lock);
3226 inode->i_private = NULL;
3227 spin_unlock(&inode->i_lock);
3231 inode_unlock(inode);
3260 * File creation. Allocate an inode, and we're done..
3263 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3266 struct inode *inode;
3269 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3270 if (IS_ERR(inode))
3271 return PTR_ERR(inode);
3273 error = simple_acl_create(dir, inode);
3276 error = security_inode_init_security(inode, dir, &dentry->d_name,
3288 d_instantiate(dentry, inode);
3293 iput(inode);
3298 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3301 struct inode *inode;
3304 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3305 if (IS_ERR(inode)) {
3306 error = PTR_ERR(inode);
3309 error = security_inode_init_security(inode, dir, NULL,
3313 error = simple_acl_create(dir, inode);
3316 d_tmpfile(file, inode);
3321 iput(inode);
3325 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3337 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3346 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3349 struct inode *inode = d_inode(old_dentry);
3359 if (inode->i_nlink) {
3360 ret = shmem_reserve_inode(inode->i_sb, NULL);
3367 if (inode->i_nlink)
3368 shmem_free_inode(inode->i_sb, 0);
3374 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3376 inc_nlink(inode);
3377 ihold(inode); /* New dentry reference */
3379 d_instantiate(dentry, inode);
3384 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3386 struct inode *inode = d_inode(dentry);
3388 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3389 shmem_free_inode(inode->i_sb, 0);
3395 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3397 drop_nlink(inode);
3402 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3413 struct inode *old_dir, struct dentry *old_dentry)
3446 struct inode *old_dir, struct dentry *old_dentry,
3447 struct inode *new_dir, struct dentry *new_dentry,
3450 struct inode *inode = d_inode(old_dentry);
3451 int they_are_dirs = S_ISDIR(inode->i_mode);
3494 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3499 struct inode *inode;
3506 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3508 if (IS_ERR(inode))
3509 return PTR_ERR(inode);
3511 error = security_inode_init_security(inode, dir, &dentry->d_name,
3520 inode->i_size = len-1;
3522 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3523 if (!inode->i_link) {
3527 inode->i_op = &shmem_short_symlink_operations;
3529 inode_nohighmem(inode);
3530 inode->i_mapping->a_ops = &shmem_aops;
3531 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3534 inode->i_op = &shmem_symlink_inode_operations;
3544 d_instantiate(dentry, inode);
3551 iput(inode);
3561 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3568 folio = filemap_get_folio(inode->i_mapping, 0);
3577 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3607 struct inode *inode = d_inode(dentry);
3608 struct shmem_inode_info *info = SHMEM_I(inode);
3618 shmem_set_inode_flags(inode, info->fsflags);
3619 inode_set_ctime_current(inode);
3620 inode_inc_iversion(inode);
3625 * Superblocks without xattr inode operations may get some security.* xattr
3634 static int shmem_initxattrs(struct inode *inode,
3637 struct shmem_inode_info *info = SHMEM_I(inode);
3638 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3696 struct dentry *unused, struct inode *inode,
3699 struct shmem_inode_info *info = SHMEM_I(inode);
3707 struct dentry *unused, struct inode *inode,
3711 struct shmem_inode_info *info = SHMEM_I(inode);
3712 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3737 inode_set_ctime_current(inode);
3738 inode_inc_iversion(inode);
3803 static int shmem_match(struct inode *ino, void *vfh)
3811 /* Find any alias of inode, but prefer a hashed alias */
3812 static struct dentry *shmem_find_alias(struct inode *inode)
3814 struct dentry *alias = d_find_alias(inode);
3816 return alias ?: d_find_any_alias(inode);
3822 struct inode *inode;
3832 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3834 if (inode) {
3835 dentry = shmem_find_alias(inode);
3836 iput(inode);
3842 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3843 struct inode *parent)
3850 if (inode_unhashed(inode)) {
3858 if (inode_unhashed(inode))
3859 __insert_inode_hash(inode,
3860 inode->i_ino + inode->i_generation);
3864 fh[0] = inode->i_generation;
3865 fh[1] = inode->i_ino;
3866 fh[2] = ((__u64)inode->i_ino) >> 32;
4081 "User quota inode hardlimit too large.");
4090 "Group quota inode hardlimit too large.");
4264 * Showing inode{64,32} might be useful even if it's the system default,
4284 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4335 struct inode *inode;
4422 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4424 if (IS_ERR(inode)) {
4425 error = PTR_ERR(inode);
4428 inode->i_uid = sbinfo->uid;
4429 inode->i_gid = sbinfo->gid;
4430 sb->s_root = d_make_root(inode);
4467 static struct inode *shmem_alloc_inode(struct super_block *sb)
4476 static void shmem_free_in_core_inode(struct inode *inode)
4478 if (S_ISLNK(inode->i_mode))
4479 kfree(inode->i_link);
4480 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4483 static void shmem_destroy_inode(struct inode *inode)
4485 if (S_ISREG(inode->i_mode))
4486 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4487 if (S_ISDIR(inode->i_mode))
4488 simple_offset_destroy(shmem_get_offset_ctx(inode));
4808 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4810 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4820 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
4821 struct super_block *sb, struct inode *dir,
4824 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
4825 return inode ? inode : ERR_PTR(-ENOSPC);
4835 struct inode *inode;
4850 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4852 if (IS_ERR(inode)) {
4854 return ERR_CAST(inode);
4856 inode->i_flags |= i_flags;
4857 inode->i_size = size;
4858 clear_nlink(inode); /* It is unlinked */
4859 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4861 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4864 iput(inode);
4871 * underlying inode. So users of this interface must do LSM checks at a
4873 * checks are provided at the key or shm level rather than the inode.
4956 struct inode *inode = mapping->host;
4960 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,