Lines Matching defs:sbi

171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
177 if (f2fs_lfs_mode(sbi))
179 if (sbi->gc_mode == GC_URGENT_HIGH)
181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
217 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
233 err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
243 dec_valid_block_count(sbi, inode, 1);
244 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
247 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
253 err = inc_valid_block_count(sbi, inode, &count, true);
261 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
263 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
335 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
365 sbi->revoked_atomic_block += fi->atomic_write_cnt;
367 sbi->committed_atomic_block += fi->atomic_write_cnt;
378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
387 f2fs_lock_op(sbi);
391 f2fs_unlock_op(sbi);
401 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
403 if (f2fs_cp_error(sbi))
406 if (time_to_inject(sbi, FAULT_CHECKPOINT))
407 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
410 if (need && excess_cached_nats(sbi))
411 f2fs_balance_fs_bg(sbi, false);
413 if (!f2fs_is_checkpoint_ready(sbi))
420 if (has_enough_free_secs(sbi, 0, 0))
423 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
424 sbi->gc_thread->f2fs_gc_task) {
427 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
429 wake_up(&sbi->gc_thread->gc_wait_queue_head);
431 finish_wait(&sbi->gc_thread->fggc_wq, &wait);
440 f2fs_down_write(&sbi->gc_lock);
441 stat_inc_gc_call_count(sbi, FOREGROUND);
442 f2fs_gc(sbi, &gc_control);
446 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
448 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
449 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
450 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
451 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
452 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
453 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
455 SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
465 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
467 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
471 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
472 f2fs_shrink_read_extent_tree(sbi,
476 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
477 f2fs_shrink_age_extent_tree(sbi,
481 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
482 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
484 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
485 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
487 f2fs_build_free_nids(sbi, false, false);
489 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
490 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
494 if (is_inflight_io(sbi, REQ_TIME) ||
495 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
499 if (f2fs_time_over(sbi, CP_TIME))
503 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
504 f2fs_available_free_memory(sbi, INO_ENTRIES))
508 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
511 mutex_lock(&sbi->flush_lock);
514 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
517 mutex_unlock(&sbi->flush_lock);
519 stat_inc_cp_call_count(sbi, BACKGROUND);
520 f2fs_sync_fs(sbi->sb, 1);
523 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
528 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
529 test_opt(sbi, FLUSH_MERGE), ret);
531 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
535 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
540 if (!f2fs_is_multi_device(sbi))
541 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
543 for (i = 0; i < sbi->s_ndevs; i++) {
544 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
546 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
555 struct f2fs_sb_info *sbi = data;
556 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
571 ret = submit_flush_wait(sbi, cmd->ino);
587 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
589 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
593 if (test_opt(sbi, NOBARRIER))
596 if (!test_opt(sbi, FLUSH_MERGE)) {
598 ret = submit_flush_wait(sbi, ino);
605 f2fs_is_multi_device(sbi)) {
606 ret = submit_flush_wait(sbi, ino);
641 ret = submit_flush_wait(sbi, ino);
658 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
660 dev_t dev = sbi->sb->s_bdev->bd_dev;
663 if (SM_I(sbi)->fcc_info) {
664 fcc = SM_I(sbi)->fcc_info;
670 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
677 SM_I(sbi)->fcc_info = fcc;
678 if (!test_opt(sbi, FLUSH_MERGE))
682 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
694 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
696 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
706 SM_I(sbi)->fcc_info = NULL;
710 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
714 if (!f2fs_is_multi_device(sbi))
717 if (test_opt(sbi, NOBARRIER))
720 for (i = 1; i < sbi->s_ndevs; i++) {
723 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
727 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
733 f2fs_stop_checkpoint(sbi, false,
738 spin_lock(&sbi->dev_lock);
739 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
740 spin_unlock(&sbi->dev_lock);
746 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
749 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
752 if (IS_CURSEG(sbi, segno))
759 struct seg_entry *sentry = get_seg_entry(sbi, segno);
763 f2fs_bug_on(sbi, 1);
769 if (__is_large_section(sbi)) {
770 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
772 get_valid_blocks(sbi, segno, true);
774 f2fs_bug_on(sbi, unlikely(!valid_blocks ||
775 valid_blocks == CAP_BLKS_PER_SEC(sbi)));
777 if (!IS_CURSEC(sbi, secno))
783 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
786 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
793 struct seg_entry *sentry = get_seg_entry(sbi, segno);
799 valid_blocks = get_valid_blocks(sbi, segno, true);
801 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
804 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
807 if (__is_large_section(sbi)) {
808 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
811 valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
816 if (!IS_CURSEC(sbi, secno))
827 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
829 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
833 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
836 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
839 valid_blocks = get_valid_blocks(sbi, segno, false);
840 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
842 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
844 __locate_dirty_segment(sbi, segno, PRE);
845 __remove_dirty_segment(sbi, segno, DIRTY);
847 __locate_dirty_segment(sbi, segno, DIRTY);
850 __remove_dirty_segment(sbi, segno, DIRTY);
857 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
859 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
863 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
864 if (get_valid_blocks(sbi, segno, false))
866 if (IS_CURSEG(sbi, segno))
868 __locate_dirty_segment(sbi, segno, PRE);
869 __remove_dirty_segment(sbi, segno, DIRTY);
874 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
877 (overprovision_segments(sbi) - reserved_segments(sbi));
878 block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
879 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
886 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
887 se = get_seg_entry(sbi, segno);
889 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
892 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
903 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
906 (overprovision_segments(sbi) - reserved_segments(sbi));
908 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
910 if (unusable > F2FS_OPTION(sbi).unusable_cap)
912 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
913 dirty_segments(sbi) > ovp_hole_segs)
915 if (has_not_enough_free_secs(sbi, 0, 0))
921 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
923 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
927 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
928 if (get_valid_blocks(sbi, segno, false))
930 if (get_ckpt_valid_blocks(sbi, segno, false))
939 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
943 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
947 f2fs_bug_on(sbi, !len);
971 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
974 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
987 f2fs_info(sbi, "broken discard_rbtree, "
999 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1002 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1091 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1094 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1106 f2fs_bug_on(sbi, dc->ref);
1114 KERN_INFO, sbi->sb->s_id,
1136 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1146 segno = GET_SEGNO(sbi, blk);
1147 sentry = get_seg_entry(sbi, segno);
1148 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1150 if (end < START_BLOCK(sbi, segno + 1))
1151 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1153 size = BLKS_PER_SEG(sbi);
1156 f2fs_bug_on(sbi, offset != size);
1157 blk = START_BLOCK(sbi, segno + 1);
1162 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1166 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1188 if (utilization(sbi) > dcc->discard_urgent_util) {
1209 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1214 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1219 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1239 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1247 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1252 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1259 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1269 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1273 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1274 int devi = f2fs_bdev_index(sbi, bdev);
1279 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1280 __submit_zone_reset_cmd(sbi, dc, flag,
1312 if (time_to_inject(sbi, FAULT_DISCARD)) {
1329 f2fs_bug_on(sbi, !bio);
1348 __check_sit_bitmap(sbi, lstart, lstart + len);
1357 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1367 __update_discard_tree_range(sbi, bdev, lstart, start, len);
1372 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1376 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1398 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1410 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1413 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1418 __remove_discard_cmd(sbi, dc);
1433 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1446 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1450 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1515 __remove_discard_cmd(sbi, tdc);
1520 __insert_discard_cmd(sbi, bdev,
1533 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1539 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1540 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1541 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1545 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1555 if (f2fs_is_multi_device(sbi)) {
1556 int devi = f2fs_target_device_index(sbi, blkstart);
1560 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1561 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1562 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1565 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1568 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1590 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1596 err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1603 __remove_discard_cmd(sbi, dc);
1617 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1620 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1623 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1631 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1637 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1644 __issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1654 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1657 f2fs_bug_on(sbi, dc->state != D_PREP);
1660 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1664 !is_idle(sbi, DISCARD_TIME)) {
1669 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1683 __wait_all_discard_cmd(sbi, dpolicy);
1693 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1695 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1705 f2fs_bug_on(sbi, dc->state != D_PREP);
1706 __remove_discard_cmd(sbi, dc);
1715 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1717 __drop_discard_cmd(sbi);
1720 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1723 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1728 f2fs_bug_on(sbi, dc->state != D_DONE);
1733 __remove_discard_cmd(sbi, dc);
1740 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1744 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1764 __remove_discard_cmd(sbi, iter);
1774 trimmed += __wait_one_discard_bio(sbi, dc);
1781 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1788 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1791 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1792 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1793 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1794 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1800 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1802 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1807 dc = __lookup_discard_cmd(sbi, blkaddr);
1809 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1810 int devi = f2fs_bdev_index(sbi, dc->bdev);
1817 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1820 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1825 __wait_one_discard_bio(sbi, dc);
1832 __punch_discard_cmd(sbi, dc, blkaddr);
1841 __wait_one_discard_bio(sbi, dc);
1844 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1846 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1858 * @sbi: the f2fs_sb_info data for discard cmd to issue
1864 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1866 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1873 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1875 __issue_discard_cmd(sbi, &dpolicy);
1876 dropped = __drop_discard_cmd(sbi);
1879 __wait_all_discard_cmd(sbi, NULL);
1881 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1887 struct f2fs_sb_info *sbi = data;
1888 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1901 if (sbi->gc_mode == GC_URGENT_HIGH ||
1902 !f2fs_available_free_memory(sbi, DISCARD_CACHE))
1903 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1906 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1914 __wait_all_discard_cmd(sbi, NULL);
1916 if (f2fs_readonly(sbi->sb))
1920 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1926 sb_start_intwrite(sbi->sb);
1928 issued = __issue_discard_cmd(sbi, &dpolicy);
1930 __wait_all_discard_cmd(sbi, &dpolicy);
1933 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1942 sb_end_intwrite(sbi->sb);
1949 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1957 if (f2fs_is_multi_device(sbi)) {
1958 devi = f2fs_target_device_index(sbi, blkstart);
1961 f2fs_err(sbi, "Invalid block %x", blkstart);
1968 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1974 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1975 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1980 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
1992 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
1997 __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
2002 static int __issue_discard_async(struct f2fs_sb_info *sbi,
2006 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
2007 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
2009 __queue_discard_cmd(sbi, bdev, blkstart, blklen);
2013 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2023 bdev = f2fs_target_device(sbi, blkstart, NULL);
2028 f2fs_target_device(sbi, i, NULL);
2031 err = __issue_discard_async(sbi, bdev,
2041 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2042 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2044 if (f2fs_block_unit_discard(sbi) &&
2046 sbi->discard_blks--;
2050 err = __issue_discard_async(sbi, bdev, start, len);
2054 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2058 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2062 unsigned long *dmap = SIT_I(sbi)->tmp_map;
2066 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2069 if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2070 !f2fs_hw_support_discard(sbi) ||
2071 !f2fs_block_unit_discard(sbi))
2075 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
2076 SM_I(sbi)->dcc_info->nr_discards >=
2077 SM_I(sbi)->dcc_info->max_discards)
2086 while (force || SM_I(sbi)->dcc_info->nr_discards <=
2087 SM_I(sbi)->dcc_info->max_discards) {
2088 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2089 if (start >= BLKS_PER_SEG(sbi))
2093 BLKS_PER_SEG(sbi), start + 1);
2094 if (force && start && end != BLKS_PER_SEG(sbi) &&
2104 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2111 SM_I(sbi)->dcc_info->nr_discards += end - start;
2122 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2124 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2135 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2137 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2141 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2142 __set_test_and_free(sbi, segno, false);
2146 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2149 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2152 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2157 bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2160 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2170 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2171 if (start >= MAIN_SEGS(sbi))
2173 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2177 start = rounddown(start, SEGS_PER_SEC(sbi));
2178 end = roundup(end, SEGS_PER_SEC(sbi));
2186 if (!f2fs_realtime_discard_enable(sbi))
2194 if (!f2fs_sb_has_blkzoned(sbi) &&
2195 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2196 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2197 SEGS_TO_BLKS(sbi, end - start));
2201 secno = GET_SEC_FROM_SEG(sbi, start);
2202 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2203 if (!IS_CURSEC(sbi, secno) &&
2204 !get_valid_blocks(sbi, start, true))
2205 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2206 BLKS_PER_SEC(sbi));
2208 start = start_segno + SEGS_PER_SEC(sbi);
2216 if (!f2fs_block_unit_discard(sbi))
2227 BLKS_PER_SEG(sbi), cur_pos);
2230 if (f2fs_sb_has_blkzoned(sbi) ||
2234 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2239 BLKS_PER_SEG(sbi), cur_pos);
2245 if (cur_pos < BLKS_PER_SEG(sbi))
2253 wake_up_discard_thread(sbi, false);
2256 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2258 dev_t dev = sbi->sb->s_bdev->bd_dev;
2259 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2262 if (f2fs_sb_has_readonly(sbi)) {
2263 f2fs_info(sbi,
2268 if (!f2fs_realtime_discard_enable(sbi))
2271 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2281 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2286 if (SM_I(sbi)->dcc_info) {
2287 dcc = SM_I(sbi)->dcc_info;
2291 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2299 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2300 dcc->discard_granularity = BLKS_PER_SEG(sbi);
2301 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2302 dcc->discard_granularity = BLKS_PER_SEC(sbi);
2314 dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
2326 SM_I(sbi)->dcc_info = dcc;
2328 err = f2fs_start_discard_thread(sbi);
2331 SM_I(sbi)->dcc_info = NULL;
2337 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2339 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2344 f2fs_stop_discard_thread(sbi);
2350 f2fs_issue_discard_timeout(sbi);
2353 SM_I(sbi)->dcc_info = NULL;
2356 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2358 struct sit_info *sit_i = SIT_I(sbi);
2368 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2371 struct seg_entry *se = get_seg_entry(sbi, segno);
2375 __mark_sit_entry_dirty(sbi, segno);
2378 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2381 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2385 return get_seg_entry(sbi, segno)->mtime;
2388 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2392 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2393 unsigned long long ctime = get_mtime(sbi, false);
2399 se = get_seg_entry(sbi, segno);
2407 if (ctime > SIT_I(sbi)->max_mtime)
2408 SIT_I(sbi)->max_mtime = ctime;
2411 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2421 segno = GET_SEGNO(sbi, blkaddr);
2425 se = get_seg_entry(sbi, segno);
2427 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2429 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2430 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2441 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2443 f2fs_bug_on(sbi, 1);
2447 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2449 f2fs_bug_on(sbi, 1);
2454 if (f2fs_block_unit_discard(sbi) &&
2456 sbi->discard_blks--;
2462 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2472 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2474 f2fs_bug_on(sbi, 1);
2478 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2480 f2fs_bug_on(sbi, 1);
2483 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2491 spin_lock(&sbi->stat_lock);
2492 sbi->unusable_block_count++;
2493 spin_unlock(&sbi->stat_lock);
2497 if (f2fs_block_unit_discard(sbi) &&
2499 sbi->discard_blks++;
2504 __mark_sit_entry_dirty(sbi, segno);
2507 SIT_I(sbi)->written_valid_blocks += del;
2509 if (__is_large_section(sbi))
2510 get_sec_entry(sbi, segno)->valid_blocks += del;
2513 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2515 unsigned int segno = GET_SEGNO(sbi, addr);
2516 struct sit_info *sit_i = SIT_I(sbi);
2518 f2fs_bug_on(sbi, addr == NULL_ADDR);
2522 f2fs_invalidate_internal_cache(sbi, addr);
2527 update_segment_mtime(sbi, addr, 0);
2528 update_sit_entry(sbi, addr, -1);
2531 locate_dirty_segment(sbi, segno);
2536 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2538 struct sit_info *sit_i = SIT_I(sbi);
2548 segno = GET_SEGNO(sbi, blkaddr);
2549 se = get_seg_entry(sbi, segno);
2550 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2560 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2562 struct curseg_info *curseg = CURSEG_I(sbi, type);
2564 if (sbi->ckpt->alloc_type[type] == SSR)
2565 return BLKS_PER_SEG(sbi);
2572 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2578 if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2580 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2582 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2598 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2600 if (unlikely(f2fs_cp_error(sbi)))
2602 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2605 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2608 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2615 static void write_sum_page(struct f2fs_sb_info *sbi,
2618 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2621 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2624 struct curseg_info *curseg = CURSEG_I(sbi, type);
2625 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2647 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2651 struct free_segmap_info *free_i = FREE_I(sbi);
2653 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2662 static int get_new_segment(struct f2fs_sb_info *sbi,
2665 struct free_segmap_info *free_i = FREE_I(sbi);
2667 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2668 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2669 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2676 if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
2681 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2683 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2684 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2692 if (f2fs_sb_has_blkzoned(sbi)) {
2693 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
2694 hint = GET_SEC_FROM_SEG(sbi, segno);
2698 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2699 if (secno >= MAIN_SECS(sbi)) {
2701 MAIN_SECS(sbi));
2702 if (secno >= MAIN_SECS(sbi)) {
2707 segno = GET_SEG_FROM_SEC(sbi, secno);
2708 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2713 if (sbi->secs_per_zone == 1)
2718 if (CURSEG_I(sbi, i)->zone == zoneno)
2726 hint = (zoneno + 1) * sbi->secs_per_zone;
2732 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2736 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
2740 __set_inuse(sbi, segno);
2746 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2747 f2fs_bug_on(sbi, 1);
2752 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2754 struct curseg_info *curseg = CURSEG_I(sbi, type);
2764 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2771 sanity_check_seg_type(sbi, seg_type);
2777 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2780 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2782 struct curseg_info *curseg = CURSEG_I(sbi, type);
2785 sanity_check_seg_type(sbi, seg_type);
2786 if (f2fs_need_rand_seg(sbi))
2787 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2789 if (__is_large_section(sbi))
2796 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2802 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2803 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2806 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2816 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2818 struct curseg_info *curseg = CURSEG_I(sbi, type);
2824 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
2826 segno = __get_next_segno(sbi, type);
2827 ret = get_new_segment(sbi, &segno, new_sec, pinning);
2835 reset_curseg(sbi, type, 1);
2837 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2839 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2843 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2846 struct seg_entry *se = get_seg_entry(sbi, segno);
2848 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2856 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
2859 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2862 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2865 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2867 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
2874 static int change_curseg(struct f2fs_sb_info *sbi, int type)
2876 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2877 struct curseg_info *curseg = CURSEG_I(sbi, type);
2882 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2884 __set_test_and_inuse(sbi, new_segno);
2887 __remove_dirty_segment(sbi, new_segno, PRE);
2888 __remove_dirty_segment(sbi, new_segno, DIRTY);
2891 reset_curseg(sbi, type, 1);
2893 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2895 sum_page = f2fs_get_sum_page(sbi, new_segno);
2907 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2910 static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2914 struct curseg_info *curseg = CURSEG_I(sbi, type);
2919 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2920 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2923 ret = change_curseg(sbi, type);
2927 ret = new_curseg(sbi, type, true);
2929 stat_inc_seg_type(sbi, curseg);
2933 static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2935 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2938 if (!sbi->am.atgc_enabled)
2941 f2fs_down_read(&SM_I(sbi)->curseg_lock);
2944 down_write(&SIT_I(sbi)->sentry_lock);
2946 ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
2949 up_write(&SIT_I(sbi)->sentry_lock);
2952 f2fs_up_read(&SM_I(sbi)->curseg_lock);
2955 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2957 return __f2fs_init_atgc_curseg(sbi);
2960 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2962 struct curseg_info *curseg = CURSEG_I(sbi, type);
2968 if (get_valid_blocks(sbi, curseg->segno, false)) {
2969 write_sum_page(sbi, curseg->sum_blk,
2970 GET_SUM_BLOCK(sbi, curseg->segno));
2972 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2973 __set_test_and_free(sbi, curseg->segno, true);
2974 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2980 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2982 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2984 if (sbi->am.atgc_enabled)
2985 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2988 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2990 struct curseg_info *curseg = CURSEG_I(sbi, type);
2995 if (get_valid_blocks(sbi, curseg->segno, false))
2998 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2999 __set_test_and_inuse(sbi, curseg->segno);
3000 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3005 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
3007 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3009 if (sbi->am.atgc_enabled)
3010 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3013 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3016 struct curseg_info *curseg = CURSEG_I(sbi, type);
3022 sanity_check_seg_type(sbi, seg_type);
3025 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
3052 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3059 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3060 segno = get_free_segment(sbi);
3069 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3071 struct curseg_info *curseg = CURSEG_I(sbi, type);
3073 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3077 is_next_segment_free(sbi, curseg, type) &&
3078 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3080 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3085 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3088 struct curseg_info *curseg = CURSEG_I(sbi, type);
3092 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3094 down_write(&SIT_I(sbi)->sentry_lock);
3096 segno = CURSEG_I(sbi, type)->segno;
3100 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3101 ret = change_curseg(sbi, type);
3103 ret = new_curseg(sbi, type, true);
3105 stat_inc_seg_type(sbi, curseg);
3107 locate_dirty_segment(sbi, segno);
3109 up_write(&SIT_I(sbi)->sentry_lock);
3112 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3116 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3120 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3123 struct curseg_info *curseg = CURSEG_I(sbi, type);
3132 !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3133 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3138 err = new_curseg(sbi, type, true);
3141 stat_inc_seg_type(sbi, curseg);
3142 locate_dirty_segment(sbi, old_segno);
3146 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3150 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3151 down_write(&SIT_I(sbi)->sentry_lock);
3152 ret = __allocate_new_segment(sbi, type, true, force);
3153 up_write(&SIT_I(sbi)->sentry_lock);
3154 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3159 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
3165 f2fs_lock_op(sbi);
3166 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3167 f2fs_unlock_op(sbi);
3169 if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
3170 f2fs_down_write(&sbi->gc_lock);
3171 err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
3172 f2fs_up_write(&sbi->gc_lock);
3182 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3187 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3188 down_write(&SIT_I(sbi)->sentry_lock);
3190 err += __allocate_new_segment(sbi, i, false, false);
3191 up_write(&SIT_I(sbi)->sentry_lock);
3192 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3197 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3203 down_write(&SIT_I(sbi)->sentry_lock);
3205 if (add_discard_addrs(sbi, cpc, true)) {
3210 up_write(&SIT_I(sbi)->sentry_lock);
3216 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3220 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3233 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3254 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3260 __remove_discard_cmd(sbi, dc);
3264 trimmed += __wait_all_discard_cmd(sbi, NULL);
3271 __remove_discard_cmd(sbi, dc);
3284 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3294 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3296 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3299 if (end < MAIN_BLKADDR(sbi))
3302 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3303 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3308 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3309 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3310 GET_SEGNO(sbi, end);
3312 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3313 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3321 if (sbi->discard_blks == 0)
3324 f2fs_down_write(&sbi->gc_lock);
3325 stat_inc_cp_call_count(sbi, TOTAL_CALL);
3326 err = f2fs_write_checkpoint(sbi, &cpc);
3327 f2fs_up_write(&sbi->gc_lock);
3337 if (f2fs_realtime_discard_enable(sbi))
3340 start_block = START_BLOCK(sbi, start_segno);
3341 end_block = START_BLOCK(sbi, end_segno + 1);
3343 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3344 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3347 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3394 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3400 if (ei.age <= sbi->hot_data_age_threshold)
3402 if (ei.age <= sbi->warm_data_age_threshold)
3419 if (fio->sbi->am.atgc_enabled &&
3421 (fio->sbi->gc_mode != GC_URGENT_HIGH))
3450 switch (F2FS_OPTION(fio->sbi).active_logs) {
3461 f2fs_bug_on(fio->sbi, true);
3473 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3481 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3483 get_random_u32_inclusive(1, sbi->max_fragment_hole);
3493 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3498 struct sit_info *sit_i = SIT_I(sbi);
3499 struct curseg_info *curseg = CURSEG_I(sbi, type);
3506 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3517 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3518 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3519 sanity_check_seg_type(sbi, se->type);
3520 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3522 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3524 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3526 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3530 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3533 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3534 f2fs_randomize_chunk(sbi, curseg);
3536 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3538 stat_inc_block_count(sbi, curseg);
3541 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3543 update_segment_mtime(sbi, old_blkaddr, 0);
3546 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3552 update_sit_entry(sbi, *new_blkaddr, 1);
3553 update_sit_entry(sbi, old_blkaddr, -1);
3561 !((curseg->segno + 1) % sbi->segs_per_sec)) {
3567 ret = get_atssr_segment(sbi, type, se->type,
3570 if (need_new_seg(sbi, type))
3571 ret = new_curseg(sbi, type, false);
3573 ret = change_curseg(sbi, type);
3574 stat_inc_seg_type(sbi, curseg);
3587 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3588 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3591 atomic64_inc(&sbi->allocated_data_blocks);
3596 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3598 f2fs_inode_chksum_set(sbi, page);
3606 io = sbi->write_io[fio->type] + fio->temp;
3613 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3619 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3624 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3627 if (!f2fs_is_multi_device(sbi))
3631 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3635 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3638 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3639 spin_lock(&sbi->dev_lock);
3640 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3641 spin_unlock(&sbi->dev_lock);
3654 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3657 f2fs_down_read(&fio->sbi->io_order_lock);
3659 if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3665 if (f2fs_in_warm_node_list(fio->sbi, fio->page))
3666 f2fs_del_fsync_node_entry(fio->sbi, fio->page);
3669 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3670 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
3675 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3678 f2fs_up_read(&fio->sbi->io_order_lock);
3681 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3685 .sbi = sbi,
3697 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3703 stat_inc_meta_count(sbi, page->index);
3704 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3714 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3720 struct f2fs_sb_info *sbi = fio->sbi;
3723 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3730 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3736 struct f2fs_sb_info *sbi = fio->sbi;
3743 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3745 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3746 set_sbi_flag(sbi, SBI_NEED_FSCK);
3747 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3750 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
3754 if (f2fs_cp_error(sbi)) {
3760 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
3762 stat_inc_inplace_blocks(fio->sbi);
3764 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
3769 f2fs_update_device_state(fio->sbi, fio->ino,
3771 f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
3787 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3793 if (CURSEG_I(sbi, i)->segno == segno)
3799 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3804 struct sit_info *sit_i = SIT_I(sbi);
3812 segno = GET_SEGNO(sbi, new_blkaddr);
3813 se = get_seg_entry(sbi, segno);
3816 f2fs_down_write(&SM_I(sbi)->curseg_lock);
3820 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3827 if (IS_CURSEG(sbi, segno)) {
3829 type = __f2fs_get_curseg(sbi, segno);
3830 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3836 f2fs_bug_on(sbi, !IS_DATASEG(type));
3837 curseg = CURSEG_I(sbi, type);
3849 if (change_curseg(sbi, type))
3853 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3858 update_segment_mtime(sbi, new_blkaddr, 0);
3859 update_sit_entry(sbi, new_blkaddr, 1);
3861 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3862 f2fs_invalidate_internal_cache(sbi, old_blkaddr);
3864 update_segment_mtime(sbi, old_blkaddr, 0);
3865 update_sit_entry(sbi, old_blkaddr, -1);
3868 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3869 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3871 locate_dirty_segment(sbi, old_cursegno);
3876 if (change_curseg(sbi, type))
3886 f2fs_up_write(&SM_I(sbi)->curseg_lock);
3889 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3898 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3908 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3911 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3913 f2fs_submit_merged_ipu_write(sbi, NULL, page);
3916 f2fs_bug_on(sbi, locked && PageWriteback(page));
3925 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3934 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3944 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3953 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
3956 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3958 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3965 start = start_sum_block(sbi);
3967 page = f2fs_get_meta_page(sbi, start++);
3973 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3977 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3986 seg_i = CURSEG_I(sbi, i);
3990 reset_curseg(sbi, i, 0);
3995 blk_off = BLKS_PER_SEG(sbi);
4010 page = f2fs_get_meta_page(sbi, start++);
4021 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
4023 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4037 if (__exist_node_summaries(sbi))
4038 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
4040 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
4046 if (__exist_node_summaries(sbi))
4047 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
4050 blk_addr = GET_SUM_BLOCK(sbi, segno);
4053 new = f2fs_get_meta_page(sbi, blk_addr);
4059 if (__exist_node_summaries(sbi)) {
4063 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
4068 err = f2fs_restore_node_summary(sbi, segno, sum);
4075 curseg = CURSEG_I(sbi, type);
4086 reset_curseg(sbi, type, 0);
4095 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4097 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4098 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4102 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4103 int npages = f2fs_npages_for_summary_flush(sbi, true);
4106 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4110 err = read_compacted_summaries(sbi);
4116 if (__exist_node_summaries(sbi))
4117 f2fs_ra_meta_pages(sbi,
4118 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4122 err = read_normal_summaries(sbi, type);
4130 f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4138 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4147 page = f2fs_grab_meta_page(sbi, blkaddr++);
4152 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4157 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4163 seg_i = CURSEG_I(sbi, i);
4164 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4166 page = f2fs_grab_meta_page(sbi, blkaddr++);
4190 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4201 write_current_sum_page(sbi, i, blkaddr + (i - type));
4204 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4206 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4207 write_compacted_summaries(sbi, start_blk);
4209 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4212 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4214 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4239 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4242 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4245 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4248 struct sit_info *sit_i = SIT_I(sbi);
4252 src_off = current_sit_addr(sbi, start);
4253 dst_off = next_sit_addr(sbi, src_off);
4255 page = f2fs_grab_meta_page(sbi, dst_off);
4256 seg_info_to_sit_page(sbi, page, start);
4318 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4320 struct f2fs_sm_info *sm_info = SM_I(sbi);
4322 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4325 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4329 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4331 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4341 dirtied = __mark_sit_entry_dirty(sbi, segno);
4344 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4354 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4356 struct sit_info *sit_i = SIT_I(sbi);
4358 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4361 struct list_head *head = &SM_I(sbi)->sit_entry_set;
4362 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4374 add_sits_in_set(sbi);
4383 remove_sits_in_journal(sbi);
4395 (unsigned long)MAIN_SEGS(sbi));
4405 page = get_next_sit_page(sbi, start_segno);
4413 se = get_seg_entry(sbi, segno);
4417 f2fs_bug_on(sbi, 1);
4423 add_discard_addrs(sbi, cpc, false);
4429 f2fs_bug_on(sbi, offset < 0);
4434 check_block_count(sbi, segno,
4440 check_block_count(sbi, segno,
4454 f2fs_bug_on(sbi, ses->entry_cnt);
4458 f2fs_bug_on(sbi, !list_empty(head));
4459 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4465 add_discard_addrs(sbi, cpc, false);
4471 set_prefree_as_free_segments(sbi);
4474 static int build_sit_info(struct f2fs_sb_info *sbi)
4476 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4481 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4484 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4488 SM_I(sbi)->sit_info = sit_i;
4491 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4492 MAIN_SEGS(sbi)),
4497 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4498 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4504 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4506 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4508 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4514 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4532 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4536 if (__is_large_section(sbi)) {
4538 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4539 MAIN_SECS(sbi)),
4549 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4550 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4562 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4569 sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
4574 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4580 static int build_free_segmap(struct f2fs_sb_info *sbi)
4586 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4590 SM_I(sbi)->free_info = free_i;
4592 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4593 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4597 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4598 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4607 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4614 static int build_curseg(struct f2fs_sb_info *sbi)
4619 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4624 SM_I(sbi)->curseg_array = array;
4628 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4632 array[i].journal = f2fs_kzalloc(sbi,
4644 return restore_curseg_summaries(sbi);
4647 static int build_sit_entries(struct f2fs_sb_info *sbi)
4649 struct sit_info *sit_i = SIT_I(sbi);
4650 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4654 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4661 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4667 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4672 page = get_current_sit_page(sbi, start);
4679 err = check_block_count(sbi, start, &sit);
4685 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4687 f2fs_handle_error(sbi,
4694 if (!f2fs_block_unit_discard(sbi))
4698 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4705 sbi->discard_blks += BLKS_PER_SEG(sbi) -
4708 if (__is_large_section(sbi))
4709 get_sec_entry(sbi, start)->valid_blocks +=
4720 if (start >= MAIN_SEGS(sbi)) {
4721 f2fs_err(sbi, "Wrong journal entry on segno %u",
4724 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4735 err = check_block_count(sbi, start, &sit);
4741 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4744 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4750 if (f2fs_block_unit_discard(sbi)) {
4751 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4756 sbi->discard_blks += old_valid_blocks;
4757 sbi->discard_blks -= se->valid_blocks;
4761 if (__is_large_section(sbi)) {
4762 get_sec_entry(sbi, start)->valid_blocks +=
4764 get_sec_entry(sbi, start)->valid_blocks -=
4773 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4774 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4775 sit_valid_blocks[NODE], valid_node_count(sbi));
4776 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
4781 valid_user_blocks(sbi)) {
4782 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4784 valid_user_blocks(sbi));
4785 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
4792 static void init_free_segmap(struct f2fs_sb_info *sbi)
4798 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4799 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4801 sentry = get_seg_entry(sbi, start);
4803 __set_free(sbi, start);
4805 SIT_I(sbi)->written_valid_blocks +=
4811 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4813 __set_test_and_inuse(sbi, curseg_t->segno);
4817 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4819 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4820 struct free_segmap_info *free_i = FREE_I(sbi);
4826 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4827 if (segno >= MAIN_SEGS(sbi))
4830 valid_blocks = get_valid_blocks(sbi, segno, false);
4831 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4835 f2fs_bug_on(sbi, 1);
4839 __locate_dirty_segment(sbi, segno, DIRTY);
4843 if (!__is_large_section(sbi))
4847 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
4848 valid_blocks = get_valid_blocks(sbi, segno, true);
4849 secno = GET_SEC_FROM_SEG(sbi, segno);
4851 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4853 if (IS_CURSEC(sbi, secno))
4860 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4862 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4863 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4865 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4869 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4878 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4884 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4889 SM_I(sbi)->dirty_info = dirty_i;
4892 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4895 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4901 if (__is_large_section(sbi)) {
4902 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4903 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4909 init_dirty_segmap(sbi);
4910 return init_victim_secmap(sbi);
4913 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4922 struct curseg_info *curseg = CURSEG_I(sbi, i);
4923 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4926 if (f2fs_sb_has_readonly(sbi) &&
4930 sanity_check_seg_type(sbi, curseg->seg_type);
4933 f2fs_err(sbi,
4936 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4946 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
4950 f2fs_err(sbi,
4954 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4973 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4979 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4987 zone_segno = GET_SEGNO(sbi, zone_block);
4993 if (zone_segno >= MAIN_SEGS(sbi))
4999 valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
5000 if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
5001 f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
5012 f2fs_notice(sbi, "Zone without valid block has non-zero write "
5015 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
5018 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5030 f2fs_notice(sbi, "Valid blocks are not aligned with write "
5043 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
5046 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
5053 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5058 for (i = 0; i < sbi->s_ndevs; i++) {
5061 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5076 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5078 struct curseg_info *cs = CURSEG_I(sbi, type);
5083 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5087 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5088 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5090 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5100 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5112 if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5114 wp_segno = GET_SEGNO(sbi, wp_block);
5115 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5122 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5131 f2fs_allocate_new_section(sbi, type, true);
5132 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5139 if (check_zone_write_pointer(sbi, zbd, &zone))
5143 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5144 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5146 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5155 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5164 f2fs_notice(sbi,
5168 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
5171 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5180 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5185 ret = fix_curseg_write_pointer(sbi, i);
5194 struct f2fs_sb_info *sbi;
5205 return check_zone_write_pointer(args->sbi, args->fdev, zone);
5208 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5213 for (i = 0; i < sbi->s_ndevs; i++) {
5217 args.sbi = sbi;
5237 struct f2fs_sb_info *sbi, unsigned int segno)
5242 if (!sbi->unusable_blocks_per_sec)
5243 return BLKS_PER_SEG(sbi);
5245 secno = GET_SEC_FROM_SEG(sbi, segno);
5246 seg_start = START_BLOCK(sbi, segno);
5247 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5248 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5258 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5261 return BLKS_PER_SEG(sbi);
5264 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5269 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5274 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5281 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5284 if (f2fs_sb_has_blkzoned(sbi))
5285 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5287 return BLKS_PER_SEG(sbi);
5290 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5293 if (f2fs_sb_has_blkzoned(sbi))
5294 return CAP_SEGS_PER_SEC(sbi);
5296 return SEGS_PER_SEC(sbi);
5302 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5304 struct sit_info *sit_i = SIT_I(sbi);
5311 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5315 for (i = 0; i < SEGS_PER_SEC(sbi); i++)
5316 mtime += get_seg_entry(sbi, segno + i)->mtime;
5318 mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
5323 sit_i->max_mtime = get_mtime(sbi, false);
5328 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5330 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5331 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5335 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5340 sbi->sm_info = sm_info;
5353 if (!f2fs_lfs_mode(sbi))
5357 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5359 sm_info->min_ssr_sections = reserved_sections(sbi);
5365 err = f2fs_create_flush_cmd_control(sbi);
5369 err = create_discard_cmd_control(sbi);
5373 err = build_sit_info(sbi);
5376 err = build_free_segmap(sbi);
5379 err = build_curseg(sbi);
5384 err = build_sit_entries(sbi);
5388 init_free_segmap(sbi);
5389 err = build_dirty_segmap(sbi);
5393 err = sanity_check_curseg(sbi);
5397 init_min_max_mtime(sbi);
5401 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5404 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5412 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5414 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5420 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5422 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5430 discard_dirty_segmap(sbi, i);
5432 if (__is_large_section(sbi)) {
5438 destroy_victim_secmap(sbi);
5439 SM_I(sbi)->dirty_info = NULL;
5443 static void destroy_curseg(struct f2fs_sb_info *sbi)
5445 struct curseg_info *array = SM_I(sbi)->curseg_array;
5450 SM_I(sbi)->curseg_array = NULL;
5458 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5460 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5464 SM_I(sbi)->free_info = NULL;
5470 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5472 struct sit_info *sit_i = SIT_I(sbi);
5485 SM_I(sbi)->sit_info = NULL;
5494 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5496 struct f2fs_sm_info *sm_info = SM_I(sbi);
5500 f2fs_destroy_flush_cmd_control(sbi, true);
5501 destroy_discard_cmd_control(sbi);
5502 destroy_dirty_segmap(sbi);
5503 destroy_curseg(sbi);
5504 destroy_free_segmap(sbi);
5505 destroy_sit_info(sbi);
5506 sbi->sm_info = NULL;