1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs shrinker support
4 *   the basic infra was copied from fs/ubifs/shrinker.c
5 *
6 * Copyright (c) 2015 Motorola Mobility
7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8 */
9#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11
12#include "f2fs.h"
13#include "node.h"
14
15static LIST_HEAD(f2fs_list);
16static DEFINE_SPINLOCK(f2fs_list_lock);
17static unsigned int shrinker_run_no;
18
19static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20{
21	return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
22}
23
24static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25{
26	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
27
28	return count > 0 ? count : 0;
29}
30
31static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
32{
33	return atomic_read(&sbi->total_zombie_tree) +
34				atomic_read(&sbi->total_ext_node);
35}
36
37unsigned long f2fs_shrink_count(struct shrinker *shrink,
38				struct shrink_control *sc)
39{
40	struct f2fs_sb_info *sbi;
41	struct list_head *p;
42	unsigned long count = 0;
43
44	spin_lock(&f2fs_list_lock);
45	p = f2fs_list.next;
46	while (p != &f2fs_list) {
47		sbi = list_entry(p, struct f2fs_sb_info, s_list);
48
49		/* stop f2fs_put_super */
50		if (!mutex_trylock(&sbi->umount_mutex)) {
51			p = p->next;
52			continue;
53		}
54		spin_unlock(&f2fs_list_lock);
55
56		/* count extent cache entries */
57		count += __count_extent_cache(sbi);
58
59		/* count clean nat cache entries */
60		count += __count_nat_entries(sbi);
61
62		/* count free nids cache entries */
63		count += __count_free_nids(sbi);
64
65		spin_lock(&f2fs_list_lock);
66		p = p->next;
67		mutex_unlock(&sbi->umount_mutex);
68	}
69	spin_unlock(&f2fs_list_lock);
70	return count;
71}
72
73unsigned long f2fs_shrink_scan(struct shrinker *shrink,
74				struct shrink_control *sc)
75{
76	unsigned long nr = sc->nr_to_scan;
77	struct f2fs_sb_info *sbi;
78	struct list_head *p;
79	unsigned int run_no;
80	unsigned long freed = 0;
81
82	spin_lock(&f2fs_list_lock);
83	do {
84		run_no = ++shrinker_run_no;
85	} while (run_no == 0);
86	p = f2fs_list.next;
87	while (p != &f2fs_list) {
88		sbi = list_entry(p, struct f2fs_sb_info, s_list);
89
90		if (sbi->shrinker_run_no == run_no)
91			break;
92
93		/* stop f2fs_put_super */
94		if (!mutex_trylock(&sbi->umount_mutex)) {
95			p = p->next;
96			continue;
97		}
98		spin_unlock(&f2fs_list_lock);
99
100		sbi->shrinker_run_no = run_no;
101
102		/* shrink extent cache entries */
103		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
104
105		/* shrink clean nat cache entries */
106		if (freed < nr)
107			freed += f2fs_try_to_free_nats(sbi, nr - freed);
108
109		/* shrink free nids cache entries */
110		if (freed < nr)
111			freed += f2fs_try_to_free_nids(sbi, nr - freed);
112
113		spin_lock(&f2fs_list_lock);
114		p = p->next;
115		list_move_tail(&sbi->s_list, &f2fs_list);
116		mutex_unlock(&sbi->umount_mutex);
117		if (freed >= nr)
118			break;
119	}
120	spin_unlock(&f2fs_list_lock);
121	return freed;
122}
123
124void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
125{
126	spin_lock(&f2fs_list_lock);
127	list_add_tail(&sbi->s_list, &f2fs_list);
128	spin_unlock(&f2fs_list_lock);
129}
130
131void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
132{
133	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
134
135	spin_lock(&f2fs_list_lock);
136	list_del_init(&sbi->s_list);
137	spin_unlock(&f2fs_list_lock);
138}
139