1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 *             http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
15#include <linux/sched/signal.h>
16#include <linux/random.h>
17#include <linux/sched/mm.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
23#include "iostat.h"
24#include <trace/events/f2fs.h>
25
26static struct kmem_cache *victim_entry_slab;
27
28static unsigned int count_bits(const unsigned long *addr,
29				unsigned int offset, unsigned int len);
30
31static int gc_thread_func(void *data)
32{
33	struct f2fs_sb_info *sbi = data;
34	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37	unsigned int wait_ms;
38	struct f2fs_gc_control gc_control = {
39		.victim_segno = NULL_SEGNO,
40		.should_migrate_blocks = false,
41		.err_gc_skipped = false };
42
43	wait_ms = gc_th->min_sleep_time;
44
45	set_freezable();
46	do {
47		bool sync_mode, foreground = false;
48
49		wait_event_freezable_timeout(*wq,
50				kthread_should_stop() ||
51				waitqueue_active(fggc_wq) ||
52				gc_th->gc_wake,
53				msecs_to_jiffies(wait_ms));
54
55		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56			foreground = true;
57
58		/* give it a try one time */
59		if (gc_th->gc_wake)
60			gc_th->gc_wake = false;
61
62		if (f2fs_readonly(sbi->sb)) {
63			stat_other_skip_bggc_count(sbi);
64			continue;
65		}
66		if (kthread_should_stop())
67			break;
68
69		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70			increase_sleep_time(gc_th, &wait_ms);
71			stat_other_skip_bggc_count(sbi);
72			continue;
73		}
74
75		if (time_to_inject(sbi, FAULT_CHECKPOINT))
76			f2fs_stop_checkpoint(sbi, false,
77					STOP_CP_REASON_FAULT_INJECT);
78
79		if (!sb_start_write_trylock(sbi->sb)) {
80			stat_other_skip_bggc_count(sbi);
81			continue;
82		}
83
84		/*
85		 * [GC triggering condition]
86		 * 0. GC is not conducted currently.
87		 * 1. There are enough dirty segments.
88		 * 2. IO subsystem is idle by checking the # of writeback pages.
89		 * 3. IO subsystem is idle by checking the # of requests in
90		 *    bdev's request list.
91		 *
92		 * Note) We have to avoid triggering GCs frequently.
93		 * Because it is possible that some segments can be
94		 * invalidated soon after by user update or deletion.
95		 * So, I'd like to wait some time to collect dirty segments.
96		 */
97		if (sbi->gc_mode == GC_URGENT_HIGH ||
98				sbi->gc_mode == GC_URGENT_MID) {
99			wait_ms = gc_th->urgent_sleep_time;
100			f2fs_down_write(&sbi->gc_lock);
101			goto do_gc;
102		}
103
104		if (foreground) {
105			f2fs_down_write(&sbi->gc_lock);
106			goto do_gc;
107		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
108			stat_other_skip_bggc_count(sbi);
109			goto next;
110		}
111
112		if (!is_idle(sbi, GC_TIME)) {
113			increase_sleep_time(gc_th, &wait_ms);
114			f2fs_up_write(&sbi->gc_lock);
115			stat_io_skip_bggc_count(sbi);
116			goto next;
117		}
118
119		if (has_enough_invalid_blocks(sbi))
120			decrease_sleep_time(gc_th, &wait_ms);
121		else
122			increase_sleep_time(gc_th, &wait_ms);
123do_gc:
124		stat_inc_gc_call_count(sbi, foreground ?
125					FOREGROUND : BACKGROUND);
126
127		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
128
129		/* foreground GC was been triggered via f2fs_balance_fs() */
130		if (foreground)
131			sync_mode = false;
132
133		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
134		gc_control.no_bg_gc = foreground;
135		gc_control.nr_free_secs = foreground ? 1 : 0;
136
137		/* if return value is not zero, no victim was selected */
138		if (f2fs_gc(sbi, &gc_control)) {
139			/* don't bother wait_ms by foreground gc */
140			if (!foreground)
141				wait_ms = gc_th->no_gc_sleep_time;
142		} else {
143			/* reset wait_ms to default sleep time */
144			if (wait_ms == gc_th->no_gc_sleep_time)
145				wait_ms = gc_th->min_sleep_time;
146		}
147
148		if (foreground)
149			wake_up_all(&gc_th->fggc_wq);
150
151		trace_f2fs_background_gc(sbi->sb, wait_ms,
152				prefree_segments(sbi), free_segments(sbi));
153
154		/* balancing f2fs's metadata periodically */
155		f2fs_balance_fs_bg(sbi, true);
156next:
157		if (sbi->gc_mode != GC_NORMAL) {
158			spin_lock(&sbi->gc_remaining_trials_lock);
159			if (sbi->gc_remaining_trials) {
160				sbi->gc_remaining_trials--;
161				if (!sbi->gc_remaining_trials)
162					sbi->gc_mode = GC_NORMAL;
163			}
164			spin_unlock(&sbi->gc_remaining_trials_lock);
165		}
166		sb_end_write(sbi->sb);
167
168	} while (!kthread_should_stop());
169	return 0;
170}
171
172int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
173{
174	struct f2fs_gc_kthread *gc_th;
175	dev_t dev = sbi->sb->s_bdev->bd_dev;
176
177	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
178	if (!gc_th)
179		return -ENOMEM;
180
181	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
182	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
183	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
184	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
185
186	gc_th->gc_wake = false;
187
188	sbi->gc_thread = gc_th;
189	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
190	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
191	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
192			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
193	if (IS_ERR(gc_th->f2fs_gc_task)) {
194		int err = PTR_ERR(gc_th->f2fs_gc_task);
195
196		kfree(gc_th);
197		sbi->gc_thread = NULL;
198		return err;
199	}
200
201	return 0;
202}
203
204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
205{
206	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
207
208	if (!gc_th)
209		return;
210	kthread_stop(gc_th->f2fs_gc_task);
211	wake_up_all(&gc_th->fggc_wq);
212	kfree(gc_th);
213	sbi->gc_thread = NULL;
214}
215
216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
217{
218	int gc_mode;
219
220	if (gc_type == BG_GC) {
221		if (sbi->am.atgc_enabled)
222			gc_mode = GC_AT;
223		else
224			gc_mode = GC_CB;
225	} else {
226		gc_mode = GC_GREEDY;
227	}
228
229	switch (sbi->gc_mode) {
230	case GC_IDLE_CB:
231		gc_mode = GC_CB;
232		break;
233	case GC_IDLE_GREEDY:
234	case GC_URGENT_HIGH:
235		gc_mode = GC_GREEDY;
236		break;
237	case GC_IDLE_AT:
238		gc_mode = GC_AT;
239		break;
240	}
241
242	return gc_mode;
243}
244
245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
246			int type, struct victim_sel_policy *p)
247{
248	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249
250	if (p->alloc_mode == SSR) {
251		p->gc_mode = GC_GREEDY;
252		p->dirty_bitmap = dirty_i->dirty_segmap[type];
253		p->max_search = dirty_i->nr_dirty[type];
254		p->ofs_unit = 1;
255	} else if (p->alloc_mode == AT_SSR) {
256		p->gc_mode = GC_GREEDY;
257		p->dirty_bitmap = dirty_i->dirty_segmap[type];
258		p->max_search = dirty_i->nr_dirty[type];
259		p->ofs_unit = 1;
260	} else {
261		p->gc_mode = select_gc_type(sbi, gc_type);
262		p->ofs_unit = SEGS_PER_SEC(sbi);
263		if (__is_large_section(sbi)) {
264			p->dirty_bitmap = dirty_i->dirty_secmap;
265			p->max_search = count_bits(p->dirty_bitmap,
266						0, MAIN_SECS(sbi));
267		} else {
268			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
269			p->max_search = dirty_i->nr_dirty[DIRTY];
270		}
271	}
272
273	/*
274	 * adjust candidates range, should select all dirty segments for
275	 * foreground GC and urgent GC cases.
276	 */
277	if (gc_type != FG_GC &&
278			(sbi->gc_mode != GC_URGENT_HIGH) &&
279			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
280			p->max_search > sbi->max_victim_search)
281		p->max_search = sbi->max_victim_search;
282
283	/* let's select beginning hot/small space first. */
284	if (f2fs_need_rand_seg(sbi))
285		p->offset = get_random_u32_below(MAIN_SECS(sbi) *
286						SEGS_PER_SEC(sbi));
287	else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
288		p->offset = 0;
289	else
290		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
291}
292
293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
294				struct victim_sel_policy *p)
295{
296	/* SSR allocates in a segment unit */
297	if (p->alloc_mode == SSR)
298		return BLKS_PER_SEG(sbi);
299	else if (p->alloc_mode == AT_SSR)
300		return UINT_MAX;
301
302	/* LFS */
303	if (p->gc_mode == GC_GREEDY)
304		return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
305	else if (p->gc_mode == GC_CB)
306		return UINT_MAX;
307	else if (p->gc_mode == GC_AT)
308		return UINT_MAX;
309	else /* No other gc_mode */
310		return 0;
311}
312
313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314{
315	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316	unsigned int secno;
317
318	/*
319	 * If the gc_type is FG_GC, we can select victim segments
320	 * selected by background GC before.
321	 * Those segments guarantee they have small valid blocks.
322	 */
323	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
324		if (sec_usage_check(sbi, secno))
325			continue;
326		clear_bit(secno, dirty_i->victim_secmap);
327		return GET_SEG_FROM_SEC(sbi, secno);
328	}
329	return NULL_SEGNO;
330}
331
332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333{
334	struct sit_info *sit_i = SIT_I(sbi);
335	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
337	unsigned long long mtime = 0;
338	unsigned int vblocks;
339	unsigned char age = 0;
340	unsigned char u;
341	unsigned int i;
342	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
343
344	for (i = 0; i < usable_segs_per_sec; i++)
345		mtime += get_seg_entry(sbi, start + i)->mtime;
346	vblocks = get_valid_blocks(sbi, segno, true);
347
348	mtime = div_u64(mtime, usable_segs_per_sec);
349	vblocks = div_u64(vblocks, usable_segs_per_sec);
350
351	u = BLKS_TO_SEGS(sbi, vblocks * 100);
352
353	/* Handle if the system time has changed by the user */
354	if (mtime < sit_i->min_mtime)
355		sit_i->min_mtime = mtime;
356	if (mtime > sit_i->max_mtime)
357		sit_i->max_mtime = mtime;
358	if (sit_i->max_mtime != sit_i->min_mtime)
359		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
360				sit_i->max_mtime - sit_i->min_mtime);
361
362	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
363}
364
365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
366			unsigned int segno, struct victim_sel_policy *p)
367{
368	if (p->alloc_mode == SSR)
369		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
370
371	/* alloc_mode == LFS */
372	if (p->gc_mode == GC_GREEDY)
373		return get_valid_blocks(sbi, segno, true);
374	else if (p->gc_mode == GC_CB)
375		return get_cb_cost(sbi, segno);
376
377	f2fs_bug_on(sbi, 1);
378	return 0;
379}
380
381static unsigned int count_bits(const unsigned long *addr,
382				unsigned int offset, unsigned int len)
383{
384	unsigned int end = offset + len, sum = 0;
385
386	while (offset < end) {
387		if (test_bit(offset++, addr))
388			++sum;
389	}
390	return sum;
391}
392
393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
394				struct rb_root_cached *root)
395{
396#ifdef CONFIG_F2FS_CHECK_FS
397	struct rb_node *cur = rb_first_cached(root), *next;
398	struct victim_entry *cur_ve, *next_ve;
399
400	while (cur) {
401		next = rb_next(cur);
402		if (!next)
403			return true;
404
405		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
406		next_ve = rb_entry(next, struct victim_entry, rb_node);
407
408		if (cur_ve->mtime > next_ve->mtime) {
409			f2fs_info(sbi, "broken victim_rbtree, "
410				"cur_mtime(%llu) next_mtime(%llu)",
411				cur_ve->mtime, next_ve->mtime);
412			return false;
413		}
414		cur = next;
415	}
416#endif
417	return true;
418}
419
420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
421					unsigned long long mtime)
422{
423	struct atgc_management *am = &sbi->am;
424	struct rb_node *node = am->root.rb_root.rb_node;
425	struct victim_entry *ve = NULL;
426
427	while (node) {
428		ve = rb_entry(node, struct victim_entry, rb_node);
429
430		if (mtime < ve->mtime)
431			node = node->rb_left;
432		else
433			node = node->rb_right;
434	}
435	return ve;
436}
437
438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
439		unsigned long long mtime, unsigned int segno)
440{
441	struct atgc_management *am = &sbi->am;
442	struct victim_entry *ve;
443
444	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
445
446	ve->mtime = mtime;
447	ve->segno = segno;
448
449	list_add_tail(&ve->list, &am->victim_list);
450	am->victim_count++;
451
452	return ve;
453}
454
455static void __insert_victim_entry(struct f2fs_sb_info *sbi,
456				unsigned long long mtime, unsigned int segno)
457{
458	struct atgc_management *am = &sbi->am;
459	struct rb_root_cached *root = &am->root;
460	struct rb_node **p = &root->rb_root.rb_node;
461	struct rb_node *parent = NULL;
462	struct victim_entry *ve;
463	bool left_most = true;
464
465	/* look up rb tree to find parent node */
466	while (*p) {
467		parent = *p;
468		ve = rb_entry(parent, struct victim_entry, rb_node);
469
470		if (mtime < ve->mtime) {
471			p = &(*p)->rb_left;
472		} else {
473			p = &(*p)->rb_right;
474			left_most = false;
475		}
476	}
477
478	ve = __create_victim_entry(sbi, mtime, segno);
479
480	rb_link_node(&ve->rb_node, parent, p);
481	rb_insert_color_cached(&ve->rb_node, root, left_most);
482}
483
484static void add_victim_entry(struct f2fs_sb_info *sbi,
485				struct victim_sel_policy *p, unsigned int segno)
486{
487	struct sit_info *sit_i = SIT_I(sbi);
488	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
489	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
490	unsigned long long mtime = 0;
491	unsigned int i;
492
493	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
494		if (p->gc_mode == GC_AT &&
495			get_valid_blocks(sbi, segno, true) == 0)
496			return;
497	}
498
499	for (i = 0; i < SEGS_PER_SEC(sbi); i++)
500		mtime += get_seg_entry(sbi, start + i)->mtime;
501	mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
502
503	/* Handle if the system time has changed by the user */
504	if (mtime < sit_i->min_mtime)
505		sit_i->min_mtime = mtime;
506	if (mtime > sit_i->max_mtime)
507		sit_i->max_mtime = mtime;
508	if (mtime < sit_i->dirty_min_mtime)
509		sit_i->dirty_min_mtime = mtime;
510	if (mtime > sit_i->dirty_max_mtime)
511		sit_i->dirty_max_mtime = mtime;
512
513	/* don't choose young section as candidate */
514	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
515		return;
516
517	__insert_victim_entry(sbi, mtime, segno);
518}
519
520static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
521						struct victim_sel_policy *p)
522{
523	struct sit_info *sit_i = SIT_I(sbi);
524	struct atgc_management *am = &sbi->am;
525	struct rb_root_cached *root = &am->root;
526	struct rb_node *node;
527	struct victim_entry *ve;
528	unsigned long long total_time;
529	unsigned long long age, u, accu;
530	unsigned long long max_mtime = sit_i->dirty_max_mtime;
531	unsigned long long min_mtime = sit_i->dirty_min_mtime;
532	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
533	unsigned int vblocks;
534	unsigned int dirty_threshold = max(am->max_candidate_count,
535					am->candidate_ratio *
536					am->victim_count / 100);
537	unsigned int age_weight = am->age_weight;
538	unsigned int cost;
539	unsigned int iter = 0;
540
541	if (max_mtime < min_mtime)
542		return;
543
544	max_mtime += 1;
545	total_time = max_mtime - min_mtime;
546
547	accu = div64_u64(ULLONG_MAX, total_time);
548	accu = min_t(unsigned long long, div_u64(accu, 100),
549					DEFAULT_ACCURACY_CLASS);
550
551	node = rb_first_cached(root);
552next:
553	ve = rb_entry_safe(node, struct victim_entry, rb_node);
554	if (!ve)
555		return;
556
557	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
558		goto skip;
559
560	/* age = 10000 * x% * 60 */
561	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
562								age_weight;
563
564	vblocks = get_valid_blocks(sbi, ve->segno, true);
565	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
566
567	/* u = 10000 * x% * 40 */
568	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
569							(100 - age_weight);
570
571	f2fs_bug_on(sbi, age + u >= UINT_MAX);
572
573	cost = UINT_MAX - (age + u);
574	iter++;
575
576	if (cost < p->min_cost ||
577			(cost == p->min_cost && age > p->oldest_age)) {
578		p->min_cost = cost;
579		p->oldest_age = age;
580		p->min_segno = ve->segno;
581	}
582skip:
583	if (iter < dirty_threshold) {
584		node = rb_next(node);
585		goto next;
586	}
587}
588
589/*
590 * select candidates around source section in range of
591 * [target - dirty_threshold, target + dirty_threshold]
592 */
593static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
594						struct victim_sel_policy *p)
595{
596	struct sit_info *sit_i = SIT_I(sbi);
597	struct atgc_management *am = &sbi->am;
598	struct victim_entry *ve;
599	unsigned long long age;
600	unsigned long long max_mtime = sit_i->dirty_max_mtime;
601	unsigned long long min_mtime = sit_i->dirty_min_mtime;
602	unsigned int vblocks;
603	unsigned int dirty_threshold = max(am->max_candidate_count,
604					am->candidate_ratio *
605					am->victim_count / 100);
606	unsigned int cost, iter;
607	int stage = 0;
608
609	if (max_mtime < min_mtime)
610		return;
611	max_mtime += 1;
612next_stage:
613	iter = 0;
614	ve = __lookup_victim_entry(sbi, p->age);
615next_node:
616	if (!ve) {
617		if (stage++ == 0)
618			goto next_stage;
619		return;
620	}
621
622	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
623		goto skip_node;
624
625	age = max_mtime - ve->mtime;
626
627	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
628	f2fs_bug_on(sbi, !vblocks);
629
630	/* rare case */
631	if (vblocks == BLKS_PER_SEG(sbi))
632		goto skip_node;
633
634	iter++;
635
636	age = max_mtime - abs(p->age - age);
637	cost = UINT_MAX - vblocks;
638
639	if (cost < p->min_cost ||
640			(cost == p->min_cost && age > p->oldest_age)) {
641		p->min_cost = cost;
642		p->oldest_age = age;
643		p->min_segno = ve->segno;
644	}
645skip_node:
646	if (iter < dirty_threshold) {
647		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
648					rb_next(&ve->rb_node),
649					struct victim_entry, rb_node);
650		goto next_node;
651	}
652
653	if (stage++ == 0)
654		goto next_stage;
655}
656
657static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
658						struct victim_sel_policy *p)
659{
660	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
661
662	if (p->gc_mode == GC_AT)
663		atgc_lookup_victim(sbi, p);
664	else if (p->alloc_mode == AT_SSR)
665		atssr_lookup_victim(sbi, p);
666	else
667		f2fs_bug_on(sbi, 1);
668}
669
670static void release_victim_entry(struct f2fs_sb_info *sbi)
671{
672	struct atgc_management *am = &sbi->am;
673	struct victim_entry *ve, *tmp;
674
675	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
676		list_del(&ve->list);
677		kmem_cache_free(victim_entry_slab, ve);
678		am->victim_count--;
679	}
680
681	am->root = RB_ROOT_CACHED;
682
683	f2fs_bug_on(sbi, am->victim_count);
684	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
685}
686
687static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
688{
689	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
690	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
691
692	if (!dirty_i->enable_pin_section)
693		return false;
694	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
695		dirty_i->pinned_secmap_cnt++;
696	return true;
697}
698
699static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
700{
701	return dirty_i->pinned_secmap_cnt;
702}
703
704static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
705						unsigned int secno)
706{
707	return dirty_i->enable_pin_section &&
708		f2fs_pinned_section_exists(dirty_i) &&
709		test_bit(secno, dirty_i->pinned_secmap);
710}
711
712static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
713{
714	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
715
716	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
717		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
718		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
719	}
720	DIRTY_I(sbi)->enable_pin_section = enable;
721}
722
723static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
724							unsigned int segno)
725{
726	if (!f2fs_is_pinned_file(inode))
727		return 0;
728	if (gc_type != FG_GC)
729		return -EBUSY;
730	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
731		f2fs_pin_file_control(inode, true);
732	return -EAGAIN;
733}
734
735/*
736 * This function is called from two paths.
737 * One is garbage collection and the other is SSR segment selection.
738 * When it is called during GC, it just gets a victim segment
739 * and it does not remove it from dirty seglist.
740 * When it is called from SSR segment selection, it finds a segment
741 * which has minimum valid blocks and removes it from dirty seglist.
742 */
743int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
744			int gc_type, int type, char alloc_mode,
745			unsigned long long age)
746{
747	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
748	struct sit_info *sm = SIT_I(sbi);
749	struct victim_sel_policy p;
750	unsigned int secno, last_victim;
751	unsigned int last_segment;
752	unsigned int nsearched;
753	bool is_atgc;
754	int ret = 0;
755
756	mutex_lock(&dirty_i->seglist_lock);
757	last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
758
759	p.alloc_mode = alloc_mode;
760	p.age = age;
761	p.age_threshold = sbi->am.age_threshold;
762
763retry:
764	select_policy(sbi, gc_type, type, &p);
765	p.min_segno = NULL_SEGNO;
766	p.oldest_age = 0;
767	p.min_cost = get_max_cost(sbi, &p);
768
769	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
770	nsearched = 0;
771
772	if (is_atgc)
773		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
774
775	if (*result != NULL_SEGNO) {
776		if (!get_valid_blocks(sbi, *result, false)) {
777			ret = -ENODATA;
778			goto out;
779		}
780
781		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
782			ret = -EBUSY;
783		else
784			p.min_segno = *result;
785		goto out;
786	}
787
788	ret = -ENODATA;
789	if (p.max_search == 0)
790		goto out;
791
792	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
793		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
794			p.min_segno = sbi->next_victim_seg[BG_GC];
795			*result = p.min_segno;
796			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
797			goto got_result;
798		}
799		if (gc_type == FG_GC &&
800				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
801			p.min_segno = sbi->next_victim_seg[FG_GC];
802			*result = p.min_segno;
803			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
804			goto got_result;
805		}
806	}
807
808	last_victim = sm->last_victim[p.gc_mode];
809	if (p.alloc_mode == LFS && gc_type == FG_GC) {
810		p.min_segno = check_bg_victims(sbi);
811		if (p.min_segno != NULL_SEGNO)
812			goto got_it;
813	}
814
815	while (1) {
816		unsigned long cost, *dirty_bitmap;
817		unsigned int unit_no, segno;
818
819		dirty_bitmap = p.dirty_bitmap;
820		unit_no = find_next_bit(dirty_bitmap,
821				last_segment / p.ofs_unit,
822				p.offset / p.ofs_unit);
823		segno = unit_no * p.ofs_unit;
824		if (segno >= last_segment) {
825			if (sm->last_victim[p.gc_mode]) {
826				last_segment =
827					sm->last_victim[p.gc_mode];
828				sm->last_victim[p.gc_mode] = 0;
829				p.offset = 0;
830				continue;
831			}
832			break;
833		}
834
835		p.offset = segno + p.ofs_unit;
836		nsearched++;
837
838#ifdef CONFIG_F2FS_CHECK_FS
839		/*
840		 * skip selecting the invalid segno (that is failed due to block
841		 * validity check failure during GC) to avoid endless GC loop in
842		 * such cases.
843		 */
844		if (test_bit(segno, sm->invalid_segmap))
845			goto next;
846#endif
847
848		secno = GET_SEC_FROM_SEG(sbi, segno);
849
850		if (sec_usage_check(sbi, secno))
851			goto next;
852
853		/* Don't touch checkpointed data */
854		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
855			if (p.alloc_mode == LFS) {
856				/*
857				 * LFS is set to find source section during GC.
858				 * The victim should have no checkpointed data.
859				 */
860				if (get_ckpt_valid_blocks(sbi, segno, true))
861					goto next;
862			} else {
863				/*
864				 * SSR | AT_SSR are set to find target segment
865				 * for writes which can be full by checkpointed
866				 * and newly written blocks.
867				 */
868				if (!f2fs_segment_has_free_slot(sbi, segno))
869					goto next;
870			}
871		}
872
873		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
874			goto next;
875
876		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
877			goto next;
878
879		if (is_atgc) {
880			add_victim_entry(sbi, &p, segno);
881			goto next;
882		}
883
884		cost = get_gc_cost(sbi, segno, &p);
885
886		if (p.min_cost > cost) {
887			p.min_segno = segno;
888			p.min_cost = cost;
889		}
890next:
891		if (nsearched >= p.max_search) {
892			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
893				sm->last_victim[p.gc_mode] =
894					last_victim + p.ofs_unit;
895			else
896				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
897			sm->last_victim[p.gc_mode] %=
898				(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
899			break;
900		}
901	}
902
903	/* get victim for GC_AT/AT_SSR */
904	if (is_atgc) {
905		lookup_victim_by_age(sbi, &p);
906		release_victim_entry(sbi);
907	}
908
909	if (is_atgc && p.min_segno == NULL_SEGNO &&
910			sm->elapsed_time < p.age_threshold) {
911		p.age_threshold = 0;
912		goto retry;
913	}
914
915	if (p.min_segno != NULL_SEGNO) {
916got_it:
917		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
918got_result:
919		if (p.alloc_mode == LFS) {
920			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
921			if (gc_type == FG_GC)
922				sbi->cur_victim_sec = secno;
923			else
924				set_bit(secno, dirty_i->victim_secmap);
925		}
926		ret = 0;
927
928	}
929out:
930	if (p.min_segno != NULL_SEGNO)
931		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
932				sbi->cur_victim_sec,
933				prefree_segments(sbi), free_segments(sbi));
934	mutex_unlock(&dirty_i->seglist_lock);
935
936	return ret;
937}
938
939static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
940{
941	struct inode_entry *ie;
942
943	ie = radix_tree_lookup(&gc_list->iroot, ino);
944	if (ie)
945		return ie->inode;
946	return NULL;
947}
948
949static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
950{
951	struct inode_entry *new_ie;
952
953	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
954		iput(inode);
955		return;
956	}
957	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
958					GFP_NOFS, true, NULL);
959	new_ie->inode = inode;
960
961	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
962	list_add_tail(&new_ie->list, &gc_list->ilist);
963}
964
965static void put_gc_inode(struct gc_inode_list *gc_list)
966{
967	struct inode_entry *ie, *next_ie;
968
969	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
970		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
971		iput(ie->inode);
972		list_del(&ie->list);
973		kmem_cache_free(f2fs_inode_entry_slab, ie);
974	}
975}
976
977static int check_valid_map(struct f2fs_sb_info *sbi,
978				unsigned int segno, int offset)
979{
980	struct sit_info *sit_i = SIT_I(sbi);
981	struct seg_entry *sentry;
982	int ret;
983
984	down_read(&sit_i->sentry_lock);
985	sentry = get_seg_entry(sbi, segno);
986	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
987	up_read(&sit_i->sentry_lock);
988	return ret;
989}
990
991/*
992 * This function compares node address got in summary with that in NAT.
993 * On validity, copy that node with cold status, otherwise (invalid node)
994 * ignore that.
995 */
996static int gc_node_segment(struct f2fs_sb_info *sbi,
997		struct f2fs_summary *sum, unsigned int segno, int gc_type)
998{
999	struct f2fs_summary *entry;
1000	block_t start_addr;
1001	int off;
1002	int phase = 0;
1003	bool fggc = (gc_type == FG_GC);
1004	int submitted = 0;
1005	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1006
1007	start_addr = START_BLOCK(sbi, segno);
1008
1009next_step:
1010	entry = sum;
1011
1012	if (fggc && phase == 2)
1013		atomic_inc(&sbi->wb_sync_req[NODE]);
1014
1015	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1016		nid_t nid = le32_to_cpu(entry->nid);
1017		struct page *node_page;
1018		struct node_info ni;
1019		int err;
1020
1021		/* stop BG_GC if there is not enough free sections. */
1022		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1023			return submitted;
1024
1025		if (check_valid_map(sbi, segno, off) == 0)
1026			continue;
1027
1028		if (phase == 0) {
1029			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1030							META_NAT, true);
1031			continue;
1032		}
1033
1034		if (phase == 1) {
1035			f2fs_ra_node_page(sbi, nid);
1036			continue;
1037		}
1038
1039		/* phase == 2 */
1040		node_page = f2fs_get_node_page(sbi, nid);
1041		if (IS_ERR(node_page))
1042			continue;
1043
1044		/* block may become invalid during f2fs_get_node_page */
1045		if (check_valid_map(sbi, segno, off) == 0) {
1046			f2fs_put_page(node_page, 1);
1047			continue;
1048		}
1049
1050		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1051			f2fs_put_page(node_page, 1);
1052			continue;
1053		}
1054
1055		if (ni.blk_addr != start_addr + off) {
1056			f2fs_put_page(node_page, 1);
1057			continue;
1058		}
1059
1060		err = f2fs_move_node_page(node_page, gc_type);
1061		if (!err && gc_type == FG_GC)
1062			submitted++;
1063		stat_inc_node_blk_count(sbi, 1, gc_type);
1064	}
1065
1066	if (++phase < 3)
1067		goto next_step;
1068
1069	if (fggc)
1070		atomic_dec(&sbi->wb_sync_req[NODE]);
1071	return submitted;
1072}
1073
1074/*
1075 * Calculate start block index indicating the given node offset.
1076 * Be careful, caller should give this node offset only indicating direct node
1077 * blocks. If any node offsets, which point the other types of node blocks such
1078 * as indirect or double indirect node blocks, are given, it must be a caller's
1079 * bug.
1080 */
1081block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1082{
1083	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1084	unsigned int bidx;
1085
1086	if (node_ofs == 0)
1087		return 0;
1088
1089	if (node_ofs <= 2) {
1090		bidx = node_ofs - 1;
1091	} else if (node_ofs <= indirect_blks) {
1092		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1093
1094		bidx = node_ofs - 2 - dec;
1095	} else {
1096		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1097
1098		bidx = node_ofs - 5 - dec;
1099	}
1100	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1101}
1102
1103static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1104		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1105{
1106	struct page *node_page;
1107	nid_t nid;
1108	unsigned int ofs_in_node, max_addrs, base;
1109	block_t source_blkaddr;
1110
1111	nid = le32_to_cpu(sum->nid);
1112	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1113
1114	node_page = f2fs_get_node_page(sbi, nid);
1115	if (IS_ERR(node_page))
1116		return false;
1117
1118	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1119		f2fs_put_page(node_page, 1);
1120		return false;
1121	}
1122
1123	if (sum->version != dni->version) {
1124		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1125			  __func__);
1126		set_sbi_flag(sbi, SBI_NEED_FSCK);
1127	}
1128
1129	if (f2fs_check_nid_range(sbi, dni->ino)) {
1130		f2fs_put_page(node_page, 1);
1131		return false;
1132	}
1133
1134	if (IS_INODE(node_page)) {
1135		base = offset_in_addr(F2FS_INODE(node_page));
1136		max_addrs = DEF_ADDRS_PER_INODE;
1137	} else {
1138		base = 0;
1139		max_addrs = DEF_ADDRS_PER_BLOCK;
1140	}
1141
1142	if (base + ofs_in_node >= max_addrs) {
1143		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1144			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1145		f2fs_put_page(node_page, 1);
1146		return false;
1147	}
1148
1149	*nofs = ofs_of_node(node_page);
1150	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1151	f2fs_put_page(node_page, 1);
1152
1153	if (source_blkaddr != blkaddr) {
1154#ifdef CONFIG_F2FS_CHECK_FS
1155		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1156		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1157
1158		if (unlikely(check_valid_map(sbi, segno, offset))) {
1159			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1160				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1161					 blkaddr, source_blkaddr, segno);
1162				set_sbi_flag(sbi, SBI_NEED_FSCK);
1163			}
1164		}
1165#endif
1166		return false;
1167	}
1168	return true;
1169}
1170
1171static int ra_data_block(struct inode *inode, pgoff_t index)
1172{
1173	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174	struct address_space *mapping = inode->i_mapping;
1175	struct dnode_of_data dn;
1176	struct page *page;
1177	struct f2fs_io_info fio = {
1178		.sbi = sbi,
1179		.ino = inode->i_ino,
1180		.type = DATA,
1181		.temp = COLD,
1182		.op = REQ_OP_READ,
1183		.op_flags = 0,
1184		.encrypted_page = NULL,
1185		.in_list = 0,
1186	};
1187	int err;
1188
1189	page = f2fs_grab_cache_page(mapping, index, true);
1190	if (!page)
1191		return -ENOMEM;
1192
1193	if (f2fs_lookup_read_extent_cache_block(inode, index,
1194						&dn.data_blkaddr)) {
1195		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1196						DATA_GENERIC_ENHANCE_READ))) {
1197			err = -EFSCORRUPTED;
1198			goto put_page;
1199		}
1200		goto got_it;
1201	}
1202
1203	set_new_dnode(&dn, inode, NULL, NULL, 0);
1204	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1205	if (err)
1206		goto put_page;
1207	f2fs_put_dnode(&dn);
1208
1209	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1210		err = -ENOENT;
1211		goto put_page;
1212	}
1213	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1214						DATA_GENERIC_ENHANCE))) {
1215		err = -EFSCORRUPTED;
1216		goto put_page;
1217	}
1218got_it:
1219	/* read page */
1220	fio.page = page;
1221	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1222
1223	/*
1224	 * don't cache encrypted data into meta inode until previous dirty
1225	 * data were writebacked to avoid racing between GC and flush.
1226	 */
1227	f2fs_wait_on_page_writeback(page, DATA, true, true);
1228
1229	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1230
1231	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1232					dn.data_blkaddr,
1233					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1234	if (!fio.encrypted_page) {
1235		err = -ENOMEM;
1236		goto put_page;
1237	}
1238
1239	err = f2fs_submit_page_bio(&fio);
1240	if (err)
1241		goto put_encrypted_page;
1242	f2fs_put_page(fio.encrypted_page, 0);
1243	f2fs_put_page(page, 1);
1244
1245	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1246	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1247
1248	return 0;
1249put_encrypted_page:
1250	f2fs_put_page(fio.encrypted_page, 1);
1251put_page:
1252	f2fs_put_page(page, 1);
1253	return err;
1254}
1255
1256/*
1257 * Move data block via META_MAPPING while keeping locked data page.
1258 * This can be used to move blocks, aka LBAs, directly on disk.
1259 */
1260static int move_data_block(struct inode *inode, block_t bidx,
1261				int gc_type, unsigned int segno, int off)
1262{
1263	struct f2fs_io_info fio = {
1264		.sbi = F2FS_I_SB(inode),
1265		.ino = inode->i_ino,
1266		.type = DATA,
1267		.temp = COLD,
1268		.op = REQ_OP_READ,
1269		.op_flags = 0,
1270		.encrypted_page = NULL,
1271		.in_list = 0,
1272	};
1273	struct dnode_of_data dn;
1274	struct f2fs_summary sum;
1275	struct node_info ni;
1276	struct page *page, *mpage;
1277	block_t newaddr;
1278	int err = 0;
1279	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1280	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1281				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1282				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1283
1284	/* do not read out */
1285	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1286	if (!page)
1287		return -ENOMEM;
1288
1289	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1290		err = -ENOENT;
1291		goto out;
1292	}
1293
1294	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1295	if (err)
1296		goto out;
1297
1298	set_new_dnode(&dn, inode, NULL, NULL, 0);
1299	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1300	if (err)
1301		goto out;
1302
1303	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1304		ClearPageUptodate(page);
1305		err = -ENOENT;
1306		goto put_out;
1307	}
1308
1309	/*
1310	 * don't cache encrypted data into meta inode until previous dirty
1311	 * data were writebacked to avoid racing between GC and flush.
1312	 */
1313	f2fs_wait_on_page_writeback(page, DATA, true, true);
1314
1315	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1316
1317	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1318	if (err)
1319		goto put_out;
1320
1321	/* read page */
1322	fio.page = page;
1323	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1324
1325	if (lfs_mode)
1326		f2fs_down_write(&fio.sbi->io_order_lock);
1327
1328	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1329					fio.old_blkaddr, false);
1330	if (!mpage) {
1331		err = -ENOMEM;
1332		goto up_out;
1333	}
1334
1335	fio.encrypted_page = mpage;
1336
1337	/* read source block in mpage */
1338	if (!PageUptodate(mpage)) {
1339		err = f2fs_submit_page_bio(&fio);
1340		if (err) {
1341			f2fs_put_page(mpage, 1);
1342			goto up_out;
1343		}
1344
1345		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1346							F2FS_BLKSIZE);
1347		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1348							F2FS_BLKSIZE);
1349
1350		lock_page(mpage);
1351		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1352						!PageUptodate(mpage))) {
1353			err = -EIO;
1354			f2fs_put_page(mpage, 1);
1355			goto up_out;
1356		}
1357	}
1358
1359	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1360
1361	/* allocate block address */
1362	err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1363				&sum, type, NULL);
1364	if (err) {
1365		f2fs_put_page(mpage, 1);
1366		/* filesystem should shutdown, no need to recovery block */
1367		goto up_out;
1368	}
1369
1370	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1371				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1372	if (!fio.encrypted_page) {
1373		err = -ENOMEM;
1374		f2fs_put_page(mpage, 1);
1375		goto recover_block;
1376	}
1377
1378	/* write target block */
1379	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1380	memcpy(page_address(fio.encrypted_page),
1381				page_address(mpage), PAGE_SIZE);
1382	f2fs_put_page(mpage, 1);
1383
1384	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1385
1386	set_page_dirty(fio.encrypted_page);
1387	if (clear_page_dirty_for_io(fio.encrypted_page))
1388		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1389
1390	set_page_writeback(fio.encrypted_page);
1391
1392	fio.op = REQ_OP_WRITE;
1393	fio.op_flags = REQ_SYNC;
1394	fio.new_blkaddr = newaddr;
1395	f2fs_submit_page_write(&fio);
1396
1397	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1398
1399	f2fs_update_data_blkaddr(&dn, newaddr);
1400	set_inode_flag(inode, FI_APPEND_WRITE);
1401
1402	f2fs_put_page(fio.encrypted_page, 1);
1403recover_block:
1404	if (err)
1405		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1406							true, true, true);
1407up_out:
1408	if (lfs_mode)
1409		f2fs_up_write(&fio.sbi->io_order_lock);
1410put_out:
1411	f2fs_put_dnode(&dn);
1412out:
1413	f2fs_put_page(page, 1);
1414	return err;
1415}
1416
1417static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1418							unsigned int segno, int off)
1419{
1420	struct page *page;
1421	int err = 0;
1422
1423	page = f2fs_get_lock_data_page(inode, bidx, true);
1424	if (IS_ERR(page))
1425		return PTR_ERR(page);
1426
1427	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1428		err = -ENOENT;
1429		goto out;
1430	}
1431
1432	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1433	if (err)
1434		goto out;
1435
1436	if (gc_type == BG_GC) {
1437		if (PageWriteback(page)) {
1438			err = -EAGAIN;
1439			goto out;
1440		}
1441		set_page_dirty(page);
1442		set_page_private_gcing(page);
1443	} else {
1444		struct f2fs_io_info fio = {
1445			.sbi = F2FS_I_SB(inode),
1446			.ino = inode->i_ino,
1447			.type = DATA,
1448			.temp = COLD,
1449			.op = REQ_OP_WRITE,
1450			.op_flags = REQ_SYNC,
1451			.old_blkaddr = NULL_ADDR,
1452			.page = page,
1453			.encrypted_page = NULL,
1454			.need_lock = LOCK_REQ,
1455			.io_type = FS_GC_DATA_IO,
1456		};
1457		bool is_dirty = PageDirty(page);
1458
1459retry:
1460		f2fs_wait_on_page_writeback(page, DATA, true, true);
1461
1462		set_page_dirty(page);
1463		if (clear_page_dirty_for_io(page)) {
1464			inode_dec_dirty_pages(inode);
1465			f2fs_remove_dirty_inode(inode);
1466		}
1467
1468		set_page_private_gcing(page);
1469
1470		err = f2fs_do_write_data_page(&fio);
1471		if (err) {
1472			clear_page_private_gcing(page);
1473			if (err == -ENOMEM) {
1474				memalloc_retry_wait(GFP_NOFS);
1475				goto retry;
1476			}
1477			if (is_dirty)
1478				set_page_dirty(page);
1479		}
1480	}
1481out:
1482	f2fs_put_page(page, 1);
1483	return err;
1484}
1485
1486/*
1487 * This function tries to get parent node of victim data block, and identifies
1488 * data block validity. If the block is valid, copy that with cold status and
1489 * modify parent node.
1490 * If the parent node is not valid or the data block address is different,
1491 * the victim data block is ignored.
1492 */
1493static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1494		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1495		bool force_migrate)
1496{
1497	struct super_block *sb = sbi->sb;
1498	struct f2fs_summary *entry;
1499	block_t start_addr;
1500	int off;
1501	int phase = 0;
1502	int submitted = 0;
1503	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1504
1505	start_addr = START_BLOCK(sbi, segno);
1506
1507next_step:
1508	entry = sum;
1509
1510	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1511		struct page *data_page;
1512		struct inode *inode;
1513		struct node_info dni; /* dnode info for the data */
1514		unsigned int ofs_in_node, nofs;
1515		block_t start_bidx;
1516		nid_t nid = le32_to_cpu(entry->nid);
1517
1518		/*
1519		 * stop BG_GC if there is not enough free sections.
1520		 * Or, stop GC if the segment becomes fully valid caused by
1521		 * race condition along with SSR block allocation.
1522		 */
1523		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1524			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1525							CAP_BLKS_PER_SEC(sbi)))
1526			return submitted;
1527
1528		if (check_valid_map(sbi, segno, off) == 0)
1529			continue;
1530
1531		if (phase == 0) {
1532			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1533							META_NAT, true);
1534			continue;
1535		}
1536
1537		if (phase == 1) {
1538			f2fs_ra_node_page(sbi, nid);
1539			continue;
1540		}
1541
1542		/* Get an inode by ino with checking validity */
1543		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1544			continue;
1545
1546		if (phase == 2) {
1547			f2fs_ra_node_page(sbi, dni.ino);
1548			continue;
1549		}
1550
1551		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1552
1553		if (phase == 3) {
1554			int err;
1555
1556			inode = f2fs_iget(sb, dni.ino);
1557			if (IS_ERR(inode) || is_bad_inode(inode) ||
1558					special_file(inode->i_mode))
1559				continue;
1560
1561			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1562			if (err == -EAGAIN) {
1563				iput(inode);
1564				return submitted;
1565			}
1566
1567			if (!f2fs_down_write_trylock(
1568				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1569				iput(inode);
1570				sbi->skipped_gc_rwsem++;
1571				continue;
1572			}
1573
1574			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1575								ofs_in_node;
1576
1577			if (f2fs_post_read_required(inode)) {
1578				int err = ra_data_block(inode, start_bidx);
1579
1580				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1581				if (err) {
1582					iput(inode);
1583					continue;
1584				}
1585				add_gc_inode(gc_list, inode);
1586				continue;
1587			}
1588
1589			data_page = f2fs_get_read_data_page(inode, start_bidx,
1590							REQ_RAHEAD, true, NULL);
1591			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1592			if (IS_ERR(data_page)) {
1593				iput(inode);
1594				continue;
1595			}
1596
1597			f2fs_put_page(data_page, 0);
1598			add_gc_inode(gc_list, inode);
1599			continue;
1600		}
1601
1602		/* phase 4 */
1603		inode = find_gc_inode(gc_list, dni.ino);
1604		if (inode) {
1605			struct f2fs_inode_info *fi = F2FS_I(inode);
1606			bool locked = false;
1607			int err;
1608
1609			if (S_ISREG(inode->i_mode)) {
1610				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1611					sbi->skipped_gc_rwsem++;
1612					continue;
1613				}
1614				if (!f2fs_down_write_trylock(
1615						&fi->i_gc_rwsem[READ])) {
1616					sbi->skipped_gc_rwsem++;
1617					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1618					continue;
1619				}
1620				locked = true;
1621
1622				/* wait for all inflight aio data */
1623				inode_dio_wait(inode);
1624			}
1625
1626			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1627								+ ofs_in_node;
1628			if (f2fs_post_read_required(inode))
1629				err = move_data_block(inode, start_bidx,
1630							gc_type, segno, off);
1631			else
1632				err = move_data_page(inode, start_bidx, gc_type,
1633								segno, off);
1634
1635			if (!err && (gc_type == FG_GC ||
1636					f2fs_post_read_required(inode)))
1637				submitted++;
1638
1639			if (locked) {
1640				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1641				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1642			}
1643
1644			stat_inc_data_blk_count(sbi, 1, gc_type);
1645		}
1646	}
1647
1648	if (++phase < 5)
1649		goto next_step;
1650
1651	return submitted;
1652}
1653
1654static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1655			int gc_type)
1656{
1657	struct sit_info *sit_i = SIT_I(sbi);
1658	int ret;
1659
1660	down_write(&sit_i->sentry_lock);
1661	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
1662	up_write(&sit_i->sentry_lock);
1663	return ret;
1664}
1665
1666static int do_garbage_collect(struct f2fs_sb_info *sbi,
1667				unsigned int start_segno,
1668				struct gc_inode_list *gc_list, int gc_type,
1669				bool force_migrate)
1670{
1671	struct page *sum_page;
1672	struct f2fs_summary_block *sum;
1673	struct blk_plug plug;
1674	unsigned int segno = start_segno;
1675	unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1676	int seg_freed = 0, migrated = 0;
1677	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1678						SUM_TYPE_DATA : SUM_TYPE_NODE;
1679	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1680	int submitted = 0;
1681
1682	if (__is_large_section(sbi))
1683		end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1684
1685	/*
1686	 * zone-capacity can be less than zone-size in zoned devices,
1687	 * resulting in less than expected usable segments in the zone,
1688	 * calculate the end segno in the zone which can be garbage collected
1689	 */
1690	if (f2fs_sb_has_blkzoned(sbi))
1691		end_segno -= SEGS_PER_SEC(sbi) -
1692					f2fs_usable_segs_in_sec(sbi, segno);
1693
1694	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1695
1696	/* readahead multi ssa blocks those have contiguous address */
1697	if (__is_large_section(sbi))
1698		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1699					end_segno - segno, META_SSA, true);
1700
1701	/* reference all summary page */
1702	while (segno < end_segno) {
1703		sum_page = f2fs_get_sum_page(sbi, segno++);
1704		if (IS_ERR(sum_page)) {
1705			int err = PTR_ERR(sum_page);
1706
1707			end_segno = segno - 1;
1708			for (segno = start_segno; segno < end_segno; segno++) {
1709				sum_page = find_get_page(META_MAPPING(sbi),
1710						GET_SUM_BLOCK(sbi, segno));
1711				f2fs_put_page(sum_page, 0);
1712				f2fs_put_page(sum_page, 0);
1713			}
1714			return err;
1715		}
1716		unlock_page(sum_page);
1717	}
1718
1719	blk_start_plug(&plug);
1720
1721	for (segno = start_segno; segno < end_segno; segno++) {
1722
1723		/* find segment summary of victim */
1724		sum_page = find_get_page(META_MAPPING(sbi),
1725					GET_SUM_BLOCK(sbi, segno));
1726		f2fs_put_page(sum_page, 0);
1727
1728		if (get_valid_blocks(sbi, segno, false) == 0)
1729			goto freed;
1730		if (gc_type == BG_GC && __is_large_section(sbi) &&
1731				migrated >= sbi->migration_granularity)
1732			goto skip;
1733		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1734			goto skip;
1735
1736		sum = page_address(sum_page);
1737		if (type != GET_SUM_TYPE((&sum->footer))) {
1738			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1739				 segno, type, GET_SUM_TYPE((&sum->footer)));
1740			set_sbi_flag(sbi, SBI_NEED_FSCK);
1741			f2fs_stop_checkpoint(sbi, false,
1742				STOP_CP_REASON_CORRUPTED_SUMMARY);
1743			goto skip;
1744		}
1745
1746		/*
1747		 * this is to avoid deadlock:
1748		 * - lock_page(sum_page)         - f2fs_replace_block
1749		 *  - check_valid_map()            - down_write(sentry_lock)
1750		 *   - down_read(sentry_lock)     - change_curseg()
1751		 *                                  - lock_page(sum_page)
1752		 */
1753		if (type == SUM_TYPE_NODE)
1754			submitted += gc_node_segment(sbi, sum->entries, segno,
1755								gc_type);
1756		else
1757			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1758							segno, gc_type,
1759							force_migrate);
1760
1761		stat_inc_gc_seg_count(sbi, data_type, gc_type);
1762		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1763		migrated++;
1764
1765freed:
1766		if (gc_type == FG_GC &&
1767				get_valid_blocks(sbi, segno, false) == 0)
1768			seg_freed++;
1769
1770		if (__is_large_section(sbi))
1771			sbi->next_victim_seg[gc_type] =
1772				(segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1773skip:
1774		f2fs_put_page(sum_page, 0);
1775	}
1776
1777	if (submitted)
1778		f2fs_submit_merged_write(sbi, data_type);
1779
1780	blk_finish_plug(&plug);
1781
1782	if (migrated)
1783		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1784
1785	return seg_freed;
1786}
1787
1788int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1789{
1790	int gc_type = gc_control->init_gc_type;
1791	unsigned int segno = gc_control->victim_segno;
1792	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1793	int ret = 0;
1794	struct cp_control cpc;
1795	struct gc_inode_list gc_list = {
1796		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1797		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1798	};
1799	unsigned int skipped_round = 0, round = 0;
1800	unsigned int upper_secs;
1801
1802	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1803				gc_control->nr_free_secs,
1804				get_pages(sbi, F2FS_DIRTY_NODES),
1805				get_pages(sbi, F2FS_DIRTY_DENTS),
1806				get_pages(sbi, F2FS_DIRTY_IMETA),
1807				free_sections(sbi),
1808				free_segments(sbi),
1809				reserved_segments(sbi),
1810				prefree_segments(sbi));
1811
1812	cpc.reason = __get_cp_reason(sbi);
1813gc_more:
1814	sbi->skipped_gc_rwsem = 0;
1815	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1816		ret = -EINVAL;
1817		goto stop;
1818	}
1819	if (unlikely(f2fs_cp_error(sbi))) {
1820		ret = -EIO;
1821		goto stop;
1822	}
1823
1824	/* Let's run FG_GC, if we don't have enough space. */
1825	if (has_not_enough_free_secs(sbi, 0, 0)) {
1826		gc_type = FG_GC;
1827
1828		/*
1829		 * For example, if there are many prefree_segments below given
1830		 * threshold, we can make them free by checkpoint. Then, we
1831		 * secure free segments which doesn't need fggc any more.
1832		 */
1833		if (prefree_segments(sbi)) {
1834			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1835			ret = f2fs_write_checkpoint(sbi, &cpc);
1836			if (ret)
1837				goto stop;
1838			/* Reset due to checkpoint */
1839			sec_freed = 0;
1840		}
1841	}
1842
1843	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1844	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1845		ret = -EINVAL;
1846		goto stop;
1847	}
1848retry:
1849	ret = __get_victim(sbi, &segno, gc_type);
1850	if (ret) {
1851		/* allow to search victim from sections has pinned data */
1852		if (ret == -ENODATA && gc_type == FG_GC &&
1853				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1854			f2fs_unpin_all_sections(sbi, false);
1855			goto retry;
1856		}
1857		goto stop;
1858	}
1859
1860	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1861				gc_control->should_migrate_blocks);
1862	if (seg_freed < 0)
1863		goto stop;
1864
1865	total_freed += seg_freed;
1866
1867	if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1868		sec_freed++;
1869		total_sec_freed++;
1870	}
1871
1872	if (gc_type == FG_GC) {
1873		sbi->cur_victim_sec = NULL_SEGNO;
1874
1875		if (has_enough_free_secs(sbi, sec_freed, 0)) {
1876			if (!gc_control->no_bg_gc &&
1877			    total_sec_freed < gc_control->nr_free_secs)
1878				goto go_gc_more;
1879			goto stop;
1880		}
1881		if (sbi->skipped_gc_rwsem)
1882			skipped_round++;
1883		round++;
1884		if (skipped_round > MAX_SKIP_GC_COUNT &&
1885				skipped_round * 2 >= round) {
1886			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1887			ret = f2fs_write_checkpoint(sbi, &cpc);
1888			goto stop;
1889		}
1890	} else if (has_enough_free_secs(sbi, 0, 0)) {
1891		goto stop;
1892	}
1893
1894	__get_secs_required(sbi, NULL, &upper_secs, NULL);
1895
1896	/*
1897	 * Write checkpoint to reclaim prefree segments.
1898	 * We need more three extra sections for writer's data/node/dentry.
1899	 */
1900	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1901				prefree_segments(sbi)) {
1902		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1903		ret = f2fs_write_checkpoint(sbi, &cpc);
1904		if (ret)
1905			goto stop;
1906		/* Reset due to checkpoint */
1907		sec_freed = 0;
1908	}
1909go_gc_more:
1910	segno = NULL_SEGNO;
1911	goto gc_more;
1912
1913stop:
1914	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1915	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1916
1917	if (gc_type == FG_GC)
1918		f2fs_unpin_all_sections(sbi, true);
1919
1920	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1921				get_pages(sbi, F2FS_DIRTY_NODES),
1922				get_pages(sbi, F2FS_DIRTY_DENTS),
1923				get_pages(sbi, F2FS_DIRTY_IMETA),
1924				free_sections(sbi),
1925				free_segments(sbi),
1926				reserved_segments(sbi),
1927				prefree_segments(sbi));
1928
1929	f2fs_up_write(&sbi->gc_lock);
1930
1931	put_gc_inode(&gc_list);
1932
1933	if (gc_control->err_gc_skipped && !ret)
1934		ret = total_sec_freed ? 0 : -EAGAIN;
1935	return ret;
1936}
1937
1938int __init f2fs_create_garbage_collection_cache(void)
1939{
1940	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1941					sizeof(struct victim_entry));
1942	return victim_entry_slab ? 0 : -ENOMEM;
1943}
1944
1945void f2fs_destroy_garbage_collection_cache(void)
1946{
1947	kmem_cache_destroy(victim_entry_slab);
1948}
1949
1950static void init_atgc_management(struct f2fs_sb_info *sbi)
1951{
1952	struct atgc_management *am = &sbi->am;
1953
1954	if (test_opt(sbi, ATGC) &&
1955		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1956		am->atgc_enabled = true;
1957
1958	am->root = RB_ROOT_CACHED;
1959	INIT_LIST_HEAD(&am->victim_list);
1960	am->victim_count = 0;
1961
1962	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1963	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1964	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1965	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1966}
1967
1968void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1969{
1970	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1971
1972	/* give warm/cold data area from slower device */
1973	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1974		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1975				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1976
1977	init_atgc_management(sbi);
1978}
1979
1980int f2fs_gc_range(struct f2fs_sb_info *sbi,
1981		unsigned int start_seg, unsigned int end_seg,
1982		bool dry_run, unsigned int dry_run_sections)
1983{
1984	unsigned int segno;
1985	unsigned int gc_secs = dry_run_sections;
1986
1987	if (unlikely(f2fs_cp_error(sbi)))
1988		return -EIO;
1989
1990	for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
1991		struct gc_inode_list gc_list = {
1992			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1993			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1994		};
1995
1996		do_garbage_collect(sbi, segno, &gc_list, FG_GC,
1997						dry_run_sections == 0);
1998		put_gc_inode(&gc_list);
1999
2000		if (!dry_run && get_valid_blocks(sbi, segno, true))
2001			return -EAGAIN;
2002		if (dry_run && dry_run_sections &&
2003		    !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2004			break;
2005
2006		if (fatal_signal_pending(current))
2007			return -ERESTARTSYS;
2008	}
2009
2010	return 0;
2011}
2012
2013static int free_segment_range(struct f2fs_sb_info *sbi,
2014				unsigned int secs, bool dry_run)
2015{
2016	unsigned int next_inuse, start, end;
2017	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2018	int gc_mode, gc_type;
2019	int err = 0;
2020	int type;
2021
2022	/* Force block allocation for GC */
2023	MAIN_SECS(sbi) -= secs;
2024	start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2025	end = MAIN_SEGS(sbi) - 1;
2026
2027	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2028	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2029		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2030			SIT_I(sbi)->last_victim[gc_mode] = 0;
2031
2032	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2033		if (sbi->next_victim_seg[gc_type] >= start)
2034			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2035	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2036
2037	/* Move out cursegs from the target range */
2038	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2039		err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2040		if (err)
2041			goto out;
2042	}
2043
2044	/* do GC to move out valid blocks in the range */
2045	err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2046	if (err || dry_run)
2047		goto out;
2048
2049	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2050	err = f2fs_write_checkpoint(sbi, &cpc);
2051	if (err)
2052		goto out;
2053
2054	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2055	if (next_inuse <= end) {
2056		f2fs_err(sbi, "segno %u should be free but still inuse!",
2057			 next_inuse);
2058		f2fs_bug_on(sbi, 1);
2059	}
2060out:
2061	MAIN_SECS(sbi) += secs;
2062	return err;
2063}
2064
2065static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2066{
2067	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2068	int section_count;
2069	int segment_count;
2070	int segment_count_main;
2071	long long block_count;
2072	int segs = secs * SEGS_PER_SEC(sbi);
2073
2074	f2fs_down_write(&sbi->sb_lock);
2075
2076	section_count = le32_to_cpu(raw_sb->section_count);
2077	segment_count = le32_to_cpu(raw_sb->segment_count);
2078	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2079	block_count = le64_to_cpu(raw_sb->block_count);
2080
2081	raw_sb->section_count = cpu_to_le32(section_count + secs);
2082	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2083	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2084	raw_sb->block_count = cpu_to_le64(block_count +
2085			(long long)SEGS_TO_BLKS(sbi, segs));
2086	if (f2fs_is_multi_device(sbi)) {
2087		int last_dev = sbi->s_ndevs - 1;
2088		int dev_segs =
2089			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2090
2091		raw_sb->devs[last_dev].total_segments =
2092						cpu_to_le32(dev_segs + segs);
2093	}
2094
2095	f2fs_up_write(&sbi->sb_lock);
2096}
2097
2098static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2099{
2100	int segs = secs * SEGS_PER_SEC(sbi);
2101	long long blks = SEGS_TO_BLKS(sbi, segs);
2102	long long user_block_count =
2103				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2104
2105	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2106	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2107	MAIN_SECS(sbi) += secs;
2108	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2109	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2110	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2111
2112	if (f2fs_is_multi_device(sbi)) {
2113		int last_dev = sbi->s_ndevs - 1;
2114
2115		FDEV(last_dev).total_segments =
2116				(int)FDEV(last_dev).total_segments + segs;
2117		FDEV(last_dev).end_blk =
2118				(long long)FDEV(last_dev).end_blk + blks;
2119#ifdef CONFIG_BLK_DEV_ZONED
2120		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2121					div_u64(blks, sbi->blocks_per_blkz);
2122#endif
2123	}
2124}
2125
2126int f2fs_resize_fs(struct file *filp, __u64 block_count)
2127{
2128	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2129	__u64 old_block_count, shrunk_blocks;
2130	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2131	unsigned int secs;
2132	int err = 0;
2133	__u32 rem;
2134
2135	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2136	if (block_count > old_block_count)
2137		return -EINVAL;
2138
2139	if (f2fs_is_multi_device(sbi)) {
2140		int last_dev = sbi->s_ndevs - 1;
2141		__u64 last_segs = FDEV(last_dev).total_segments;
2142
2143		if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2144								old_block_count)
2145			return -EINVAL;
2146	}
2147
2148	/* new fs size should align to section size */
2149	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2150	if (rem)
2151		return -EINVAL;
2152
2153	if (block_count == old_block_count)
2154		return 0;
2155
2156	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2157		f2fs_err(sbi, "Should run fsck to repair first.");
2158		return -EFSCORRUPTED;
2159	}
2160
2161	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2162		f2fs_err(sbi, "Checkpoint should be enabled.");
2163		return -EINVAL;
2164	}
2165
2166	err = mnt_want_write_file(filp);
2167	if (err)
2168		return err;
2169
2170	shrunk_blocks = old_block_count - block_count;
2171	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2172
2173	/* stop other GC */
2174	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2175		err = -EAGAIN;
2176		goto out_drop_write;
2177	}
2178
2179	/* stop CP to protect MAIN_SEC in free_segment_range */
2180	f2fs_lock_op(sbi);
2181
2182	spin_lock(&sbi->stat_lock);
2183	if (shrunk_blocks + valid_user_blocks(sbi) +
2184		sbi->current_reserved_blocks + sbi->unusable_block_count +
2185		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2186		err = -ENOSPC;
2187	spin_unlock(&sbi->stat_lock);
2188
2189	if (err)
2190		goto out_unlock;
2191
2192	err = free_segment_range(sbi, secs, true);
2193
2194out_unlock:
2195	f2fs_unlock_op(sbi);
2196	f2fs_up_write(&sbi->gc_lock);
2197out_drop_write:
2198	mnt_drop_write_file(filp);
2199	if (err)
2200		return err;
2201
2202	err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2203	if (err)
2204		return err;
2205
2206	if (f2fs_readonly(sbi->sb)) {
2207		err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2208		if (err)
2209			return err;
2210		return -EROFS;
2211	}
2212
2213	f2fs_down_write(&sbi->gc_lock);
2214	f2fs_down_write(&sbi->cp_global_sem);
2215
2216	spin_lock(&sbi->stat_lock);
2217	if (shrunk_blocks + valid_user_blocks(sbi) +
2218		sbi->current_reserved_blocks + sbi->unusable_block_count +
2219		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2220		err = -ENOSPC;
2221	else
2222		sbi->user_block_count -= shrunk_blocks;
2223	spin_unlock(&sbi->stat_lock);
2224	if (err)
2225		goto out_err;
2226
2227	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2228	err = free_segment_range(sbi, secs, false);
2229	if (err)
2230		goto recover_out;
2231
2232	update_sb_metadata(sbi, -secs);
2233
2234	err = f2fs_commit_super(sbi, false);
2235	if (err) {
2236		update_sb_metadata(sbi, secs);
2237		goto recover_out;
2238	}
2239
2240	update_fs_metadata(sbi, -secs);
2241	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2242	set_sbi_flag(sbi, SBI_IS_DIRTY);
2243
2244	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2245	err = f2fs_write_checkpoint(sbi, &cpc);
2246	if (err) {
2247		update_fs_metadata(sbi, secs);
2248		update_sb_metadata(sbi, secs);
2249		f2fs_commit_super(sbi, false);
2250	}
2251recover_out:
2252	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2253	if (err) {
2254		set_sbi_flag(sbi, SBI_NEED_FSCK);
2255		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2256
2257		spin_lock(&sbi->stat_lock);
2258		sbi->user_block_count += shrunk_blocks;
2259		spin_unlock(&sbi->stat_lock);
2260	}
2261out_err:
2262	f2fs_up_write(&sbi->cp_global_sem);
2263	f2fs_up_write(&sbi->gc_lock);
2264	thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2265	return err;
2266}
2267