1// SPDX-License-Identifier: GPL-2.0
2#include <linux/swap_cgroup.h>
3#include <linux/vmalloc.h>
4#include <linux/mm.h>
5
6#include <linux/swapops.h> /* depends on mm.h include */
7
8static DEFINE_MUTEX(swap_cgroup_mutex);
9struct swap_cgroup_ctrl {
10	struct page **map;
11	unsigned long length;
12	spinlock_t	lock;
13};
14
15static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
16
17struct swap_cgroup {
18	unsigned short		id;
19};
20#define SC_PER_PAGE	(PAGE_SIZE/sizeof(struct swap_cgroup))
21
22/*
23 * SwapCgroup implements "lookup" and "exchange" operations.
24 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
25 * against SwapCache. At swap_free(), this is accessed directly from swap.
26 *
27 * This means,
28 *  - we have no race in "exchange" when we're accessed via SwapCache because
29 *    SwapCache(and its swp_entry) is under lock.
30 *  - When called via swap_free(), there is no user of this entry and no race.
31 * Then, we don't need lock around "exchange".
32 *
33 * TODO: we can push these buffers out to HIGHMEM.
34 */
35
36/*
37 * allocate buffer for swap_cgroup.
38 */
39static int swap_cgroup_prepare(int type)
40{
41	struct page *page;
42	struct swap_cgroup_ctrl *ctrl;
43	unsigned long idx, max;
44
45	ctrl = &swap_cgroup_ctrl[type];
46
47	for (idx = 0; idx < ctrl->length; idx++) {
48		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
49		if (!page)
50			goto not_enough_page;
51		ctrl->map[idx] = page;
52
53		if (!(idx % SWAP_CLUSTER_MAX))
54			cond_resched();
55	}
56	return 0;
57not_enough_page:
58	max = idx;
59	for (idx = 0; idx < max; idx++)
60		__free_page(ctrl->map[idx]);
61
62	return -ENOMEM;
63}
64
65static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
66						pgoff_t offset)
67{
68	struct page *mappage;
69	struct swap_cgroup *sc;
70
71	mappage = ctrl->map[offset / SC_PER_PAGE];
72	sc = page_address(mappage);
73	return sc + offset % SC_PER_PAGE;
74}
75
76static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
77					struct swap_cgroup_ctrl **ctrlp)
78{
79	pgoff_t offset = swp_offset(ent);
80	struct swap_cgroup_ctrl *ctrl;
81
82	ctrl = &swap_cgroup_ctrl[swp_type(ent)];
83	if (ctrlp)
84		*ctrlp = ctrl;
85	return __lookup_swap_cgroup(ctrl, offset);
86}
87
88/**
89 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
90 * @ent: swap entry to be cmpxchged
91 * @old: old id
92 * @new: new id
93 *
94 * Returns old id at success, 0 at failure.
95 * (There is no mem_cgroup using 0 as its id)
96 */
97unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
98					unsigned short old, unsigned short new)
99{
100	struct swap_cgroup_ctrl *ctrl;
101	struct swap_cgroup *sc;
102	unsigned long flags;
103	unsigned short retval;
104
105	sc = lookup_swap_cgroup(ent, &ctrl);
106
107	spin_lock_irqsave(&ctrl->lock, flags);
108	retval = sc->id;
109	if (retval == old)
110		sc->id = new;
111	else
112		retval = 0;
113	spin_unlock_irqrestore(&ctrl->lock, flags);
114	return retval;
115}
116
117/**
118 * swap_cgroup_record - record mem_cgroup for a set of swap entries
119 * @ent: the first swap entry to be recorded into
120 * @id: mem_cgroup to be recorded
121 * @nr_ents: number of swap entries to be recorded
122 *
123 * Returns old value at success, 0 at failure.
124 * (Of course, old value can be 0.)
125 */
126unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
127				  unsigned int nr_ents)
128{
129	struct swap_cgroup_ctrl *ctrl;
130	struct swap_cgroup *sc;
131	unsigned short old;
132	unsigned long flags;
133	pgoff_t offset = swp_offset(ent);
134	pgoff_t end = offset + nr_ents;
135
136	sc = lookup_swap_cgroup(ent, &ctrl);
137
138	spin_lock_irqsave(&ctrl->lock, flags);
139	old = sc->id;
140	for (;;) {
141		VM_BUG_ON(sc->id != old);
142		sc->id = id;
143		offset++;
144		if (offset == end)
145			break;
146		if (offset % SC_PER_PAGE)
147			sc++;
148		else
149			sc = __lookup_swap_cgroup(ctrl, offset);
150	}
151	spin_unlock_irqrestore(&ctrl->lock, flags);
152
153	return old;
154}
155
156/**
157 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
158 * @ent: swap entry to be looked up.
159 *
160 * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
161 */
162unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
163{
164	return lookup_swap_cgroup(ent, NULL)->id;
165}
166
167int swap_cgroup_swapon(int type, unsigned long max_pages)
168{
169	void *array;
170	unsigned long length;
171	struct swap_cgroup_ctrl *ctrl;
172
173	if (mem_cgroup_disabled())
174		return 0;
175
176	length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
177
178	array = vcalloc(length, sizeof(void *));
179	if (!array)
180		goto nomem;
181
182	ctrl = &swap_cgroup_ctrl[type];
183	mutex_lock(&swap_cgroup_mutex);
184	ctrl->length = length;
185	ctrl->map = array;
186	spin_lock_init(&ctrl->lock);
187	if (swap_cgroup_prepare(type)) {
188		/* memory shortage */
189		ctrl->map = NULL;
190		ctrl->length = 0;
191		mutex_unlock(&swap_cgroup_mutex);
192		vfree(array);
193		goto nomem;
194	}
195	mutex_unlock(&swap_cgroup_mutex);
196
197	return 0;
198nomem:
199	pr_info("couldn't allocate enough memory for swap_cgroup\n");
200	pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
201	return -ENOMEM;
202}
203
204void swap_cgroup_swapoff(int type)
205{
206	struct page **map;
207	unsigned long i, length;
208	struct swap_cgroup_ctrl *ctrl;
209
210	if (mem_cgroup_disabled())
211		return;
212
213	mutex_lock(&swap_cgroup_mutex);
214	ctrl = &swap_cgroup_ctrl[type];
215	map = ctrl->map;
216	length = ctrl->length;
217	ctrl->map = NULL;
218	ctrl->length = 0;
219	mutex_unlock(&swap_cgroup_mutex);
220
221	if (map) {
222		for (i = 0; i < length; i++) {
223			struct page *page = map[i];
224			if (page)
225				__free_page(page);
226			if (!(i % SWAP_CLUSTER_MAX))
227				cond_resched();
228		}
229		vfree(map);
230	}
231}
232