Lines Matching refs:sb

42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
53 static inline void __super_lock(struct super_block *sb, bool excl)
56 down_write(&sb->s_umount);
58 down_read(&sb->s_umount);
61 static inline void super_unlock(struct super_block *sb, bool excl)
64 up_write(&sb->s_umount);
66 up_read(&sb->s_umount);
69 static inline void __super_lock_excl(struct super_block *sb)
71 __super_lock(sb, true);
74 static inline void super_unlock_excl(struct super_block *sb)
76 super_unlock(sb, true);
79 static inline void super_unlock_shared(struct super_block *sb)
81 super_unlock(sb, false);
84 static bool super_flags(const struct super_block *sb, unsigned int flags)
90 return smp_load_acquire(&sb->s_flags) & flags;
95 * @sb: superblock to wait for
103 * The caller must have acquired a temporary reference on @sb->s_count.
109 static __must_check bool super_lock(struct super_block *sb, bool excl)
111 lockdep_assert_not_held(&sb->s_umount);
114 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
117 if (super_flags(sb, SB_DYING))
120 __super_lock(sb, excl);
124 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
127 if (sb->s_flags & SB_DYING) {
128 super_unlock(sb, excl);
132 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
136 /* wait and try to acquire read-side of @sb->s_umount */
137 static inline bool super_lock_shared(struct super_block *sb)
139 return super_lock(sb, false);
142 /* wait and try to acquire write-side of @sb->s_umount */
143 static inline bool super_lock_excl(struct super_block *sb)
145 return super_lock(sb, true);
150 static void super_wake(struct super_block *sb, unsigned int flag)
160 smp_store_release(&sb->s_flags, sb->s_flags | flag);
167 wake_up_var(&sb->s_flags);
171 * One thing we have to be careful of with a per-sb shrinker is that we don't
180 struct super_block *sb;
187 sb = shrink->private_data;
196 if (!super_trylock_shared(sb))
199 if (sb->s_op->nr_cached_objects)
200 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
202 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
203 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
221 freed = prune_dcache_sb(sb, sc);
223 freed += prune_icache_sb(sb, sc);
227 freed += sb->s_op->free_cached_objects(sb, sc);
230 super_unlock_shared(sb);
237 struct super_block *sb;
240 sb = shrink->private_data;
256 if (!(sb->s_flags & SB_BORN))
260 if (sb->s_op && sb->s_op->nr_cached_objects)
261 total_objects = sb->s_op->nr_cached_objects(sb, sc);
263 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
264 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
331 * When it cannot find a suitable sb, it allocates a new
378 "sb-%s", type->name);
416 * @sb: superblock in question
421 void put_super(struct super_block *sb)
424 __put_super(sb);
428 static void kill_super_notify(struct super_block *sb)
430 lockdep_assert_not_held(&sb->s_umount);
433 if (sb->s_flags & SB_DEAD)
444 hlist_del_init(&sb->s_instances);
449 * We don't need @sb->s_umount here as every concurrent caller
453 super_wake(sb, SB_DEAD);
513 * @sb: superblock to acquire
518 * sb->kill() and be marked as SB_DEAD.
523 static bool grab_super(struct super_block *sb)
527 sb->s_count++;
529 locked = super_lock_excl(sb);
531 if (atomic_inc_not_zero(&sb->s_active)) {
532 put_super(sb);
535 super_unlock_excl(sb);
537 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
538 put_super(sb);
544 * @sb: reference we are trying to grab
559 bool super_trylock_shared(struct super_block *sb)
561 if (down_read_trylock(&sb->s_umount)) {
562 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
563 (sb->s_flags & SB_BORN))
565 super_unlock_shared(sb);
573 * @sb: superblock to retire
587 void retire_super(struct super_block *sb)
589 WARN_ON(!sb->s_bdev);
590 __super_lock_excl(sb);
591 if (sb->s_iflags & SB_I_PERSB_BDI) {
592 bdi_unregister(sb->s_bdi);
593 sb->s_iflags &= ~SB_I_PERSB_BDI;
595 sb->s_iflags |= SB_I_RETIRED;
596 super_unlock_excl(sb);
602 * @sb: superblock to kill
614 void generic_shutdown_super(struct super_block *sb)
616 const struct super_operations *sop = sb->s_op;
618 if (sb->s_root) {
619 shrink_dcache_for_umount(sb);
620 sync_filesystem(sb);
621 sb->s_flags &= ~SB_ACTIVE;
626 evict_inodes(sb);
632 fsnotify_sb_delete(sb);
633 security_sb_delete(sb);
635 if (sb->s_dio_done_wq) {
636 destroy_workqueue(sb->s_dio_done_wq);
637 sb->s_dio_done_wq = NULL;
641 sop->put_super(sb);
647 fscrypt_destroy_keyring(sb);
649 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
651 sb->s_id, sb->s_type->name)) {
659 spin_lock(&sb->s_inode_list_lock);
660 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
665 spin_unlock(&sb->s_inode_list_lock);
675 * sget{_fc}() until we passed sb->kill_sb().
677 super_wake(sb, SB_DYING);
678 super_unlock_excl(sb);
679 if (sb->s_bdi != &noop_backing_dev_info) {
680 if (sb->s_iflags & SB_I_PERSB_BDI)
681 bdi_unregister(sb->s_bdi);
682 bdi_put(sb->s_bdi);
683 sb->s_bdi = &noop_backing_dev_info;
863 void drop_super(struct super_block *sb)
865 super_unlock_shared(sb);
866 put_super(sb);
871 void drop_super_exclusive(struct super_block *sb)
873 super_unlock_excl(sb);
874 put_super(sb);
880 struct super_block *sb, *p = NULL;
883 list_for_each_entry(sb, &super_blocks, s_list) {
884 if (super_flags(sb, SB_DYING))
886 sb->s_count++;
889 f(sb);
894 p = sb;
910 struct super_block *sb, *p = NULL;
913 list_for_each_entry(sb, &super_blocks, s_list) {
916 sb->s_count++;
919 locked = super_lock_shared(sb);
921 if (sb->s_root)
922 f(sb, arg);
923 super_unlock_shared(sb);
929 p = sb;
948 struct super_block *sb, *p = NULL;
951 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
954 sb->s_count++;
957 locked = super_lock_shared(sb);
959 if (sb->s_root)
960 f(sb, arg);
961 super_unlock_shared(sb);
967 p = sb;
978 struct super_block *sb;
981 list_for_each_entry(sb, &super_blocks, s_list) {
982 if (sb->s_dev == dev) {
985 sb->s_count++;
988 locked = super_lock(sb, excl);
990 if (sb->s_root)
991 return sb;
992 super_unlock(sb, excl);
996 __put_super(sb);
1012 struct super_block *sb = fc->root->d_sb;
1020 if (sb->s_writers.frozen != SB_UNFROZEN)
1023 retval = security_sb_remount(sb, fc->security);
1029 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1030 bdev_read_only(sb->s_bdev))
1033 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1034 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1038 if (!hlist_empty(&sb->s_pins)) {
1039 super_unlock_excl(sb);
1040 group_pin_kill(&sb->s_pins);
1041 __super_lock_excl(sb);
1042 if (!sb->s_root)
1044 if (sb->s_writers.frozen != SB_UNFROZEN)
1046 remount_ro = !sb_rdonly(sb);
1049 shrink_dcache_sb(sb);
1051 /* If we are reconfiguring to RDONLY and current sb is read/write,
1056 sb_start_ro_state_change(sb);
1058 retval = sb_prepare_remount_readonly(sb);
1067 sb_start_ro_state_change(sb);
1077 sb->s_type->name, retval);
1081 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1083 sb_end_ro_state_change(sb);
1093 if (remount_ro && sb->s_bdev)
1094 invalidate_bdev(sb->s_bdev);
1098 sb_end_ro_state_change(sb);
1102 static void do_emergency_remount_callback(struct super_block *sb)
1104 bool locked = super_lock_excl(sb);
1106 if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1109 fc = fs_context_for_reconfigure(sb->s_root,
1118 super_unlock_excl(sb);
1139 static void do_thaw_all_callback(struct super_block *sb)
1141 bool locked = super_lock_excl(sb);
1143 if (locked && sb->s_root) {
1145 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1146 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1147 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1151 super_unlock_excl(sb);
1222 void kill_anon_super(struct super_block *sb)
1224 dev_t dev = sb->s_dev;
1225 generic_shutdown_super(sb);
1226 kill_super_notify(sb);
1231 void kill_litter_super(struct super_block *sb)
1233 if (sb->s_root)
1234 d_genocide(sb->s_root);
1235 kill_anon_super(sb);
1239 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1241 return set_anon_super(sb, NULL);
1245 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1247 return sb->s_fs_info == fc->s_fs_info;
1257 int (*fill_super)(struct super_block *sb,
1260 struct super_block *sb;
1263 sb = sget_fc(fc, test, set_anon_super_fc);
1264 if (IS_ERR(sb))
1265 return PTR_ERR(sb);
1267 if (!sb->s_root) {
1268 err = fill_super(sb, fc);
1272 sb->s_flags |= SB_ACTIVE;
1275 fc->root = dget(sb->s_root);
1279 deactivate_locked_super(sb);
1284 int (*fill_super)(struct super_block *sb,
1292 int (*fill_super)(struct super_block *sb,
1300 int (*fill_super)(struct super_block *sb,
1364 struct super_block *sb = bdev->bd_holder;
1368 lockdep_assert_not_held(&sb->s_umount);
1371 /* Make sure sb doesn't go away from under us */
1373 sb->s_count++;
1378 locked = super_lock(sb, excl);
1384 put_super(sb);
1389 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1390 super_unlock(sb, excl);
1394 return sb;
1399 struct super_block *sb;
1401 sb = bdev_super_lock(bdev, false);
1402 if (!sb)
1406 sync_filesystem(sb);
1407 shrink_dcache_sb(sb);
1408 invalidate_inodes(sb);
1409 if (sb->s_op->shutdown)
1410 sb->s_op->shutdown(sb);
1412 super_unlock_shared(sb);
1417 struct super_block *sb;
1419 sb = bdev_super_lock(bdev, false);
1420 if (!sb)
1423 sync_filesystem(sb);
1424 super_unlock_shared(sb);
1430 struct super_block *sb;
1432 sb = bdev_super_lock(bdev, true);
1433 if (sb) {
1434 active = atomic_inc_not_zero(&sb->s_active);
1435 super_unlock_excl(sb);
1439 return sb;
1459 struct super_block *sb;
1464 sb = get_bdev_super(bdev);
1465 if (!sb)
1468 if (sb->s_op->freeze_super)
1469 error = sb->s_op->freeze_super(sb,
1472 error = freeze_super(sb,
1476 deactivate_super(sb);
1499 struct super_block *sb;
1504 sb = get_bdev_super(bdev);
1505 if (WARN_ON_ONCE(!sb))
1508 if (sb->s_op->thaw_super)
1509 error = sb->s_op->thaw_super(sb,
1512 error = thaw_super(sb,
1514 deactivate_super(sb);
1526 int setup_bdev_super(struct super_block *sb, int sb_flags,
1533 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1562 sb->s_bdev_file = bdev_file;
1563 sb->s_bdev = bdev;
1564 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1566 sb->s_iflags |= SB_I_STABLE_WRITES;
1569 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1570 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1571 sb->s_id);
1572 sb_set_blocksize(sb, block_size(bdev));
1671 void kill_block_super(struct super_block *sb)
1673 struct block_device *bdev = sb->s_bdev;
1675 generic_shutdown_super(sb);
1678 bdev_fput(sb->s_bdev_file);
1770 struct super_block *sb;
1792 sb = fc->root->d_sb;
1793 WARN_ON(!sb->s_bdi);
1802 super_wake(sb, SB_BORN);
1804 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1816 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1817 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1827 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1844 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1845 sb->s_bdi = bdi;
1846 sb->s_iflags |= SB_I_PERSB_BDI;
1856 int super_setup_bdi(struct super_block *sb)
1860 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1867 * @sb: the super for which we wait
1873 static void sb_wait_write(struct super_block *sb, int level)
1875 percpu_down_write(sb->s_writers.rw_sem + level-1);
1882 static void lockdep_sb_freeze_release(struct super_block *sb)
1887 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1891 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1893 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1898 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1901 static void sb_freeze_unlock(struct super_block *sb, int level)
1904 percpu_up_write(sb->s_writers.rw_sem + level);
1907 static int wait_for_partially_frozen(struct super_block *sb)
1912 unsigned short old = sb->s_writers.frozen;
1914 up_write(&sb->s_umount);
1915 ret = wait_var_event_killable(&sb->s_writers.frozen,
1916 sb->s_writers.frozen != old);
1917 down_write(&sb->s_umount);
1919 sb->s_writers.frozen != SB_UNFROZEN &&
1920 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1928 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1934 ++sb->s_writers.freeze_kcount;
1936 ++sb->s_writers.freeze_ucount;
1937 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1940 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1945 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1946 --sb->s_writers.freeze_kcount;
1947 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1948 --sb->s_writers.freeze_ucount;
1949 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1952 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
1959 sb->s_writers.freeze_kcount == 0;
1962 sb->s_writers.freeze_ucount == 0;
1968 * @sb: the super to lock
1993 * During this function, sb->s_writers.frozen goes through these values:
2016 * sb->s_writers.frozen is protected by sb->s_umount.
2021 int freeze_super(struct super_block *sb, enum freeze_holder who)
2025 if (!super_lock_excl(sb)) {
2029 atomic_inc(&sb->s_active);
2032 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2033 if (may_freeze(sb, who))
2034 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2038 deactivate_locked_super(sb);
2042 if (sb->s_writers.frozen != SB_UNFROZEN) {
2043 ret = wait_for_partially_frozen(sb);
2045 deactivate_locked_super(sb);
2052 if (sb_rdonly(sb)) {
2054 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2055 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2056 wake_up_var(&sb->s_writers.frozen);
2057 super_unlock_excl(sb);
2061 sb->s_writers.frozen = SB_FREEZE_WRITE;
2063 super_unlock_excl(sb);
2064 sb_wait_write(sb, SB_FREEZE_WRITE);
2065 __super_lock_excl(sb);
2068 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2069 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2072 ret = sync_filesystem(sb);
2074 sb->s_writers.frozen = SB_UNFROZEN;
2075 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2076 wake_up_var(&sb->s_writers.frozen);
2077 deactivate_locked_super(sb);
2082 sb->s_writers.frozen = SB_FREEZE_FS;
2083 sb_wait_write(sb, SB_FREEZE_FS);
2085 if (sb->s_op->freeze_fs) {
2086 ret = sb->s_op->freeze_fs(sb);
2090 sb->s_writers.frozen = SB_UNFROZEN;
2091 sb_freeze_unlock(sb, SB_FREEZE_FS);
2092 wake_up_var(&sb->s_writers.frozen);
2093 deactivate_locked_super(sb);
2101 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2102 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2103 wake_up_var(&sb->s_writers.frozen);
2104 lockdep_sb_freeze_release(sb);
2105 super_unlock_excl(sb);
2116 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2120 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2127 if (freeze_dec(sb, who))
2130 if (sb_rdonly(sb)) {
2131 sb->s_writers.frozen = SB_UNFROZEN;
2132 wake_up_var(&sb->s_writers.frozen);
2136 lockdep_sb_freeze_acquire(sb);
2138 if (sb->s_op->unfreeze_fs) {
2139 error = sb->s_op->unfreeze_fs(sb);
2142 freeze_inc(sb, who);
2143 lockdep_sb_freeze_release(sb);
2148 sb->s_writers.frozen = SB_UNFROZEN;
2149 wake_up_var(&sb->s_writers.frozen);
2150 sb_freeze_unlock(sb, SB_FREEZE_FS);
2152 deactivate_locked_super(sb);
2156 super_unlock_excl(sb);
2162 * @sb: the super to thaw
2177 int thaw_super(struct super_block *sb, enum freeze_holder who)
2179 if (!super_lock_excl(sb)) {
2183 return thaw_super_locked(sb, who);
2193 int sb_init_dio_done_wq(struct super_block *sb)
2198 sb->s_id);
2204 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);