1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011  Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14/*
15 * Following is how we use various fields and flags of underlying
16 * struct page(s) to form a zspage.
17 *
18 * Usage of struct page fields:
19 *	page->private: points to zspage
20 *	page->index: links together all component pages of a zspage
21 *		For the huge page, this is always 0, so we use this field
22 *		to store handle.
23 *	page->page_type: first object offset in a subpage of zspage
24 *
25 * Usage of struct page flags:
26 *	PG_private: identifies the first component page
27 *	PG_owner_priv_1: identifies the huge component page
28 *
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33/*
34 * lock ordering:
35 *	page_lock
36 *	pool->lock
37 *	zspage->lock
38 */
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/sched.h>
43#include <linux/bitops.h>
44#include <linux/errno.h>
45#include <linux/highmem.h>
46#include <linux/string.h>
47#include <linux/slab.h>
48#include <linux/pgtable.h>
49#include <asm/tlbflush.h>
50#include <linux/cpumask.h>
51#include <linux/cpu.h>
52#include <linux/vmalloc.h>
53#include <linux/preempt.h>
54#include <linux/spinlock.h>
55#include <linux/shrinker.h>
56#include <linux/types.h>
57#include <linux/debugfs.h>
58#include <linux/zsmalloc.h>
59#include <linux/zpool.h>
60#include <linux/migrate.h>
61#include <linux/wait.h>
62#include <linux/pagemap.h>
63#include <linux/fs.h>
64#include <linux/local_lock.h>
65
66#define ZSPAGE_MAGIC	0x58
67
68/*
69 * This must be power of 2 and greater than or equal to sizeof(link_free).
70 * These two conditions ensure that any 'struct link_free' itself doesn't
71 * span more than 1 page which avoids complex case of mapping 2 pages simply
72 * to restore link_free pointer values.
73 */
74#define ZS_ALIGN		8
75
76#define ZS_HANDLE_SIZE (sizeof(unsigned long))
77
78/*
79 * Object location (<PFN>, <obj_idx>) is encoded as
80 * a single (unsigned long) handle value.
81 *
82 * Note that object index <obj_idx> starts from 0.
83 *
84 * This is made more complicated by various memory models and PAE.
85 */
86
87#ifndef MAX_POSSIBLE_PHYSMEM_BITS
88#ifdef MAX_PHYSMEM_BITS
89#define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
90#else
91/*
92 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
93 * be PAGE_SHIFT
94 */
95#define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
96#endif
97#endif
98
99#define _PFN_BITS		(MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
100
101/*
102 * Head in allocated object should have OBJ_ALLOCATED_TAG
103 * to identify the object was allocated or not.
104 * It's okay to add the status bit in the least bit because
105 * header keeps handle which is 4byte-aligned address so we
106 * have room for two bit at least.
107 */
108#define OBJ_ALLOCATED_TAG 1
109
110#define OBJ_TAG_BITS	1
111#define OBJ_TAG_MASK	OBJ_ALLOCATED_TAG
112
113#define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS)
114#define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
115
116#define HUGE_BITS	1
117#define FULLNESS_BITS	4
118#define CLASS_BITS	8
119#define MAGIC_VAL_BITS	8
120
121#define MAX(a, b) ((a) >= (b) ? (a) : (b))
122
123#define ZS_MAX_PAGES_PER_ZSPAGE	(_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
124
125/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
126#define ZS_MIN_ALLOC_SIZE \
127	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
128/* each chunk includes extra space to keep handle */
129#define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
130
131/*
132 * On systems with 4K page size, this gives 255 size classes! There is a
133 * trader-off here:
134 *  - Large number of size classes is potentially wasteful as free page are
135 *    spread across these classes
136 *  - Small number of size classes causes large internal fragmentation
137 *  - Probably its better to use specific size classes (empirically
138 *    determined). NOTE: all those class sizes must be set as multiple of
139 *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
140 *
141 *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
142 *  (reason above)
143 */
144#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_BITS)
145#define ZS_SIZE_CLASSES	(DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
146				      ZS_SIZE_CLASS_DELTA) + 1)
147
148/*
149 * Pages are distinguished by the ratio of used memory (that is the ratio
150 * of ->inuse objects to all objects that page can store). For example,
151 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%.
152 *
153 * The number of fullness groups is not random. It allows us to keep
154 * difference between the least busy page in the group (minimum permitted
155 * number of ->inuse objects) and the most busy page (maximum permitted
156 * number of ->inuse objects) at a reasonable value.
157 */
158enum fullness_group {
159	ZS_INUSE_RATIO_0,
160	ZS_INUSE_RATIO_10,
161	/* NOTE: 8 more fullness groups here */
162	ZS_INUSE_RATIO_99       = 10,
163	ZS_INUSE_RATIO_100,
164	NR_FULLNESS_GROUPS,
165};
166
167enum class_stat_type {
168	/* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */
169	ZS_OBJS_ALLOCATED       = NR_FULLNESS_GROUPS,
170	ZS_OBJS_INUSE,
171	NR_CLASS_STAT_TYPES,
172};
173
174struct zs_size_stat {
175	unsigned long objs[NR_CLASS_STAT_TYPES];
176};
177
178#ifdef CONFIG_ZSMALLOC_STAT
179static struct dentry *zs_stat_root;
180#endif
181
182static size_t huge_class_size;
183
184struct size_class {
185	struct list_head fullness_list[NR_FULLNESS_GROUPS];
186	/*
187	 * Size of objects stored in this class. Must be multiple
188	 * of ZS_ALIGN.
189	 */
190	int size;
191	int objs_per_zspage;
192	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
193	int pages_per_zspage;
194
195	unsigned int index;
196	struct zs_size_stat stats;
197};
198
199/*
200 * Placed within free objects to form a singly linked list.
201 * For every zspage, zspage->freeobj gives head of this list.
202 *
203 * This must be power of 2 and less than or equal to ZS_ALIGN
204 */
205struct link_free {
206	union {
207		/*
208		 * Free object index;
209		 * It's valid for non-allocated object
210		 */
211		unsigned long next;
212		/*
213		 * Handle of allocated object.
214		 */
215		unsigned long handle;
216	};
217};
218
219struct zs_pool {
220	const char *name;
221
222	struct size_class *size_class[ZS_SIZE_CLASSES];
223	struct kmem_cache *handle_cachep;
224	struct kmem_cache *zspage_cachep;
225
226	atomic_long_t pages_allocated;
227
228	struct zs_pool_stats stats;
229
230	/* Compact classes */
231	struct shrinker *shrinker;
232
233#ifdef CONFIG_ZSMALLOC_STAT
234	struct dentry *stat_dentry;
235#endif
236#ifdef CONFIG_COMPACTION
237	struct work_struct free_work;
238#endif
239	spinlock_t lock;
240	atomic_t compaction_in_progress;
241};
242
243struct zspage {
244	struct {
245		unsigned int huge:HUGE_BITS;
246		unsigned int fullness:FULLNESS_BITS;
247		unsigned int class:CLASS_BITS + 1;
248		unsigned int magic:MAGIC_VAL_BITS;
249	};
250	unsigned int inuse;
251	unsigned int freeobj;
252	struct page *first_page;
253	struct list_head list; /* fullness list */
254	struct zs_pool *pool;
255	rwlock_t lock;
256};
257
258struct mapping_area {
259	local_lock_t lock;
260	char *vm_buf; /* copy buffer for objects that span pages */
261	char *vm_addr; /* address of kmap_atomic()'ed pages */
262	enum zs_mapmode vm_mm; /* mapping mode */
263};
264
265/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
266static void SetZsHugePage(struct zspage *zspage)
267{
268	zspage->huge = 1;
269}
270
271static bool ZsHugePage(struct zspage *zspage)
272{
273	return zspage->huge;
274}
275
276static void migrate_lock_init(struct zspage *zspage);
277static void migrate_read_lock(struct zspage *zspage);
278static void migrate_read_unlock(struct zspage *zspage);
279static void migrate_write_lock(struct zspage *zspage);
280static void migrate_write_unlock(struct zspage *zspage);
281
282#ifdef CONFIG_COMPACTION
283static void kick_deferred_free(struct zs_pool *pool);
284static void init_deferred_free(struct zs_pool *pool);
285static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
286#else
287static void kick_deferred_free(struct zs_pool *pool) {}
288static void init_deferred_free(struct zs_pool *pool) {}
289static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
290#endif
291
292static int create_cache(struct zs_pool *pool)
293{
294	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
295					0, 0, NULL);
296	if (!pool->handle_cachep)
297		return 1;
298
299	pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
300					0, 0, NULL);
301	if (!pool->zspage_cachep) {
302		kmem_cache_destroy(pool->handle_cachep);
303		pool->handle_cachep = NULL;
304		return 1;
305	}
306
307	return 0;
308}
309
310static void destroy_cache(struct zs_pool *pool)
311{
312	kmem_cache_destroy(pool->handle_cachep);
313	kmem_cache_destroy(pool->zspage_cachep);
314}
315
316static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
317{
318	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
319			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
320}
321
322static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
323{
324	kmem_cache_free(pool->handle_cachep, (void *)handle);
325}
326
327static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
328{
329	return kmem_cache_zalloc(pool->zspage_cachep,
330			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
331}
332
333static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
334{
335	kmem_cache_free(pool->zspage_cachep, zspage);
336}
337
338/* pool->lock(which owns the handle) synchronizes races */
339static void record_obj(unsigned long handle, unsigned long obj)
340{
341	*(unsigned long *)handle = obj;
342}
343
344/* zpool driver */
345
346#ifdef CONFIG_ZPOOL
347
348static void *zs_zpool_create(const char *name, gfp_t gfp)
349{
350	/*
351	 * Ignore global gfp flags: zs_malloc() may be invoked from
352	 * different contexts and its caller must provide a valid
353	 * gfp mask.
354	 */
355	return zs_create_pool(name);
356}
357
358static void zs_zpool_destroy(void *pool)
359{
360	zs_destroy_pool(pool);
361}
362
363static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
364			unsigned long *handle)
365{
366	*handle = zs_malloc(pool, size, gfp);
367
368	if (IS_ERR_VALUE(*handle))
369		return PTR_ERR((void *)*handle);
370	return 0;
371}
372static void zs_zpool_free(void *pool, unsigned long handle)
373{
374	zs_free(pool, handle);
375}
376
377static void *zs_zpool_map(void *pool, unsigned long handle,
378			enum zpool_mapmode mm)
379{
380	enum zs_mapmode zs_mm;
381
382	switch (mm) {
383	case ZPOOL_MM_RO:
384		zs_mm = ZS_MM_RO;
385		break;
386	case ZPOOL_MM_WO:
387		zs_mm = ZS_MM_WO;
388		break;
389	case ZPOOL_MM_RW:
390	default:
391		zs_mm = ZS_MM_RW;
392		break;
393	}
394
395	return zs_map_object(pool, handle, zs_mm);
396}
397static void zs_zpool_unmap(void *pool, unsigned long handle)
398{
399	zs_unmap_object(pool, handle);
400}
401
402static u64 zs_zpool_total_size(void *pool)
403{
404	return zs_get_total_pages(pool) << PAGE_SHIFT;
405}
406
407static struct zpool_driver zs_zpool_driver = {
408	.type =			  "zsmalloc",
409	.owner =		  THIS_MODULE,
410	.create =		  zs_zpool_create,
411	.destroy =		  zs_zpool_destroy,
412	.malloc_support_movable = true,
413	.malloc =		  zs_zpool_malloc,
414	.free =			  zs_zpool_free,
415	.map =			  zs_zpool_map,
416	.unmap =		  zs_zpool_unmap,
417	.total_size =		  zs_zpool_total_size,
418};
419
420MODULE_ALIAS("zpool-zsmalloc");
421#endif /* CONFIG_ZPOOL */
422
423/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
424static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
425	.lock	= INIT_LOCAL_LOCK(lock),
426};
427
428static __maybe_unused int is_first_page(struct page *page)
429{
430	return PagePrivate(page);
431}
432
433/* Protected by pool->lock */
434static inline int get_zspage_inuse(struct zspage *zspage)
435{
436	return zspage->inuse;
437}
438
439
440static inline void mod_zspage_inuse(struct zspage *zspage, int val)
441{
442	zspage->inuse += val;
443}
444
445static inline struct page *get_first_page(struct zspage *zspage)
446{
447	struct page *first_page = zspage->first_page;
448
449	VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
450	return first_page;
451}
452
453static inline unsigned int get_first_obj_offset(struct page *page)
454{
455	return page->page_type;
456}
457
458static inline void set_first_obj_offset(struct page *page, unsigned int offset)
459{
460	page->page_type = offset;
461}
462
463static inline unsigned int get_freeobj(struct zspage *zspage)
464{
465	return zspage->freeobj;
466}
467
468static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
469{
470	zspage->freeobj = obj;
471}
472
473static struct size_class *zspage_class(struct zs_pool *pool,
474				       struct zspage *zspage)
475{
476	return pool->size_class[zspage->class];
477}
478
479/*
480 * zsmalloc divides the pool into various size classes where each
481 * class maintains a list of zspages where each zspage is divided
482 * into equal sized chunks. Each allocation falls into one of these
483 * classes depending on its size. This function returns index of the
484 * size class which has chunk size big enough to hold the given size.
485 */
486static int get_size_class_index(int size)
487{
488	int idx = 0;
489
490	if (likely(size > ZS_MIN_ALLOC_SIZE))
491		idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
492				ZS_SIZE_CLASS_DELTA);
493
494	return min_t(int, ZS_SIZE_CLASSES - 1, idx);
495}
496
497static inline void class_stat_inc(struct size_class *class,
498				int type, unsigned long cnt)
499{
500	class->stats.objs[type] += cnt;
501}
502
503static inline void class_stat_dec(struct size_class *class,
504				int type, unsigned long cnt)
505{
506	class->stats.objs[type] -= cnt;
507}
508
509static inline unsigned long zs_stat_get(struct size_class *class, int type)
510{
511	return class->stats.objs[type];
512}
513
514#ifdef CONFIG_ZSMALLOC_STAT
515
516static void __init zs_stat_init(void)
517{
518	if (!debugfs_initialized()) {
519		pr_warn("debugfs not available, stat dir not created\n");
520		return;
521	}
522
523	zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
524}
525
526static void __exit zs_stat_exit(void)
527{
528	debugfs_remove_recursive(zs_stat_root);
529}
530
531static unsigned long zs_can_compact(struct size_class *class);
532
533static int zs_stats_size_show(struct seq_file *s, void *v)
534{
535	int i, fg;
536	struct zs_pool *pool = s->private;
537	struct size_class *class;
538	int objs_per_zspage;
539	unsigned long obj_allocated, obj_used, pages_used, freeable;
540	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
541	unsigned long total_freeable = 0;
542	unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, };
543
544	seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n",
545			"class", "size", "10%", "20%", "30%", "40%",
546			"50%", "60%", "70%", "80%", "90%", "99%", "100%",
547			"obj_allocated", "obj_used", "pages_used",
548			"pages_per_zspage", "freeable");
549
550	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
551
552		class = pool->size_class[i];
553
554		if (class->index != i)
555			continue;
556
557		spin_lock(&pool->lock);
558
559		seq_printf(s, " %5u %5u ", i, class->size);
560		for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
561			inuse_totals[fg] += zs_stat_get(class, fg);
562			seq_printf(s, "%9lu ", zs_stat_get(class, fg));
563		}
564
565		obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
566		obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
567		freeable = zs_can_compact(class);
568		spin_unlock(&pool->lock);
569
570		objs_per_zspage = class->objs_per_zspage;
571		pages_used = obj_allocated / objs_per_zspage *
572				class->pages_per_zspage;
573
574		seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n",
575			   obj_allocated, obj_used, pages_used,
576			   class->pages_per_zspage, freeable);
577
578		total_objs += obj_allocated;
579		total_used_objs += obj_used;
580		total_pages += pages_used;
581		total_freeable += freeable;
582	}
583
584	seq_puts(s, "\n");
585	seq_printf(s, " %5s %5s ", "Total", "");
586
587	for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++)
588		seq_printf(s, "%9lu ", inuse_totals[fg]);
589
590	seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n",
591		   total_objs, total_used_objs, total_pages, "",
592		   total_freeable);
593
594	return 0;
595}
596DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
597
598static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
599{
600	if (!zs_stat_root) {
601		pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
602		return;
603	}
604
605	pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
606
607	debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
608			    &zs_stats_size_fops);
609}
610
611static void zs_pool_stat_destroy(struct zs_pool *pool)
612{
613	debugfs_remove_recursive(pool->stat_dentry);
614}
615
616#else /* CONFIG_ZSMALLOC_STAT */
617static void __init zs_stat_init(void)
618{
619}
620
621static void __exit zs_stat_exit(void)
622{
623}
624
625static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
626{
627}
628
629static inline void zs_pool_stat_destroy(struct zs_pool *pool)
630{
631}
632#endif
633
634
635/*
636 * For each size class, zspages are divided into different groups
637 * depending on their usage ratio. This function returns fullness
638 * status of the given page.
639 */
640static int get_fullness_group(struct size_class *class, struct zspage *zspage)
641{
642	int inuse, objs_per_zspage, ratio;
643
644	inuse = get_zspage_inuse(zspage);
645	objs_per_zspage = class->objs_per_zspage;
646
647	if (inuse == 0)
648		return ZS_INUSE_RATIO_0;
649	if (inuse == objs_per_zspage)
650		return ZS_INUSE_RATIO_100;
651
652	ratio = 100 * inuse / objs_per_zspage;
653	/*
654	 * Take integer division into consideration: a page with one inuse
655	 * object out of 127 possible, will end up having 0 usage ratio,
656	 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group.
657	 */
658	return ratio / 10 + 1;
659}
660
661/*
662 * Each size class maintains various freelists and zspages are assigned
663 * to one of these freelists based on the number of live objects they
664 * have. This functions inserts the given zspage into the freelist
665 * identified by <class, fullness_group>.
666 */
667static void insert_zspage(struct size_class *class,
668				struct zspage *zspage,
669				int fullness)
670{
671	class_stat_inc(class, fullness, 1);
672	list_add(&zspage->list, &class->fullness_list[fullness]);
673	zspage->fullness = fullness;
674}
675
676/*
677 * This function removes the given zspage from the freelist identified
678 * by <class, fullness_group>.
679 */
680static void remove_zspage(struct size_class *class, struct zspage *zspage)
681{
682	int fullness = zspage->fullness;
683
684	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
685
686	list_del_init(&zspage->list);
687	class_stat_dec(class, fullness, 1);
688}
689
690/*
691 * Each size class maintains zspages in different fullness groups depending
692 * on the number of live objects they contain. When allocating or freeing
693 * objects, the fullness status of the page can change, for instance, from
694 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function
695 * checks if such a status change has occurred for the given page and
696 * accordingly moves the page from the list of the old fullness group to that
697 * of the new fullness group.
698 */
699static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
700{
701	int newfg;
702
703	newfg = get_fullness_group(class, zspage);
704	if (newfg == zspage->fullness)
705		goto out;
706
707	remove_zspage(class, zspage);
708	insert_zspage(class, zspage, newfg);
709out:
710	return newfg;
711}
712
713static struct zspage *get_zspage(struct page *page)
714{
715	struct zspage *zspage = (struct zspage *)page_private(page);
716
717	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
718	return zspage;
719}
720
721static struct page *get_next_page(struct page *page)
722{
723	struct zspage *zspage = get_zspage(page);
724
725	if (unlikely(ZsHugePage(zspage)))
726		return NULL;
727
728	return (struct page *)page->index;
729}
730
731/**
732 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
733 * @obj: the encoded object value
734 * @page: page object resides in zspage
735 * @obj_idx: object index
736 */
737static void obj_to_location(unsigned long obj, struct page **page,
738				unsigned int *obj_idx)
739{
740	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
741	*obj_idx = (obj & OBJ_INDEX_MASK);
742}
743
744static void obj_to_page(unsigned long obj, struct page **page)
745{
746	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
747}
748
749/**
750 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
751 * @page: page object resides in zspage
752 * @obj_idx: object index
753 */
754static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
755{
756	unsigned long obj;
757
758	obj = page_to_pfn(page) << OBJ_INDEX_BITS;
759	obj |= obj_idx & OBJ_INDEX_MASK;
760
761	return obj;
762}
763
764static unsigned long handle_to_obj(unsigned long handle)
765{
766	return *(unsigned long *)handle;
767}
768
769static inline bool obj_allocated(struct page *page, void *obj,
770				 unsigned long *phandle)
771{
772	unsigned long handle;
773	struct zspage *zspage = get_zspage(page);
774
775	if (unlikely(ZsHugePage(zspage))) {
776		VM_BUG_ON_PAGE(!is_first_page(page), page);
777		handle = page->index;
778	} else
779		handle = *(unsigned long *)obj;
780
781	if (!(handle & OBJ_ALLOCATED_TAG))
782		return false;
783
784	/* Clear all tags before returning the handle */
785	*phandle = handle & ~OBJ_TAG_MASK;
786	return true;
787}
788
789static void reset_page(struct page *page)
790{
791	__ClearPageMovable(page);
792	ClearPagePrivate(page);
793	set_page_private(page, 0);
794	page_mapcount_reset(page);
795	page->index = 0;
796}
797
798static int trylock_zspage(struct zspage *zspage)
799{
800	struct page *cursor, *fail;
801
802	for (cursor = get_first_page(zspage); cursor != NULL; cursor =
803					get_next_page(cursor)) {
804		if (!trylock_page(cursor)) {
805			fail = cursor;
806			goto unlock;
807		}
808	}
809
810	return 1;
811unlock:
812	for (cursor = get_first_page(zspage); cursor != fail; cursor =
813					get_next_page(cursor))
814		unlock_page(cursor);
815
816	return 0;
817}
818
819static void __free_zspage(struct zs_pool *pool, struct size_class *class,
820				struct zspage *zspage)
821{
822	struct page *page, *next;
823
824	assert_spin_locked(&pool->lock);
825
826	VM_BUG_ON(get_zspage_inuse(zspage));
827	VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
828
829	next = page = get_first_page(zspage);
830	do {
831		VM_BUG_ON_PAGE(!PageLocked(page), page);
832		next = get_next_page(page);
833		reset_page(page);
834		unlock_page(page);
835		dec_zone_page_state(page, NR_ZSPAGES);
836		put_page(page);
837		page = next;
838	} while (page != NULL);
839
840	cache_free_zspage(pool, zspage);
841
842	class_stat_dec(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
843	atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
844}
845
846static void free_zspage(struct zs_pool *pool, struct size_class *class,
847				struct zspage *zspage)
848{
849	VM_BUG_ON(get_zspage_inuse(zspage));
850	VM_BUG_ON(list_empty(&zspage->list));
851
852	/*
853	 * Since zs_free couldn't be sleepable, this function cannot call
854	 * lock_page. The page locks trylock_zspage got will be released
855	 * by __free_zspage.
856	 */
857	if (!trylock_zspage(zspage)) {
858		kick_deferred_free(pool);
859		return;
860	}
861
862	remove_zspage(class, zspage);
863	__free_zspage(pool, class, zspage);
864}
865
866/* Initialize a newly allocated zspage */
867static void init_zspage(struct size_class *class, struct zspage *zspage)
868{
869	unsigned int freeobj = 1;
870	unsigned long off = 0;
871	struct page *page = get_first_page(zspage);
872
873	while (page) {
874		struct page *next_page;
875		struct link_free *link;
876		void *vaddr;
877
878		set_first_obj_offset(page, off);
879
880		vaddr = kmap_atomic(page);
881		link = (struct link_free *)vaddr + off / sizeof(*link);
882
883		while ((off += class->size) < PAGE_SIZE) {
884			link->next = freeobj++ << OBJ_TAG_BITS;
885			link += class->size / sizeof(*link);
886		}
887
888		/*
889		 * We now come to the last (full or partial) object on this
890		 * page, which must point to the first object on the next
891		 * page (if present)
892		 */
893		next_page = get_next_page(page);
894		if (next_page) {
895			link->next = freeobj++ << OBJ_TAG_BITS;
896		} else {
897			/*
898			 * Reset OBJ_TAG_BITS bit to last link to tell
899			 * whether it's allocated object or not.
900			 */
901			link->next = -1UL << OBJ_TAG_BITS;
902		}
903		kunmap_atomic(vaddr);
904		page = next_page;
905		off %= PAGE_SIZE;
906	}
907
908	set_freeobj(zspage, 0);
909}
910
911static void create_page_chain(struct size_class *class, struct zspage *zspage,
912				struct page *pages[])
913{
914	int i;
915	struct page *page;
916	struct page *prev_page = NULL;
917	int nr_pages = class->pages_per_zspage;
918
919	/*
920	 * Allocate individual pages and link them together as:
921	 * 1. all pages are linked together using page->index
922	 * 2. each sub-page point to zspage using page->private
923	 *
924	 * we set PG_private to identify the first page (i.e. no other sub-page
925	 * has this flag set).
926	 */
927	for (i = 0; i < nr_pages; i++) {
928		page = pages[i];
929		set_page_private(page, (unsigned long)zspage);
930		page->index = 0;
931		if (i == 0) {
932			zspage->first_page = page;
933			SetPagePrivate(page);
934			if (unlikely(class->objs_per_zspage == 1 &&
935					class->pages_per_zspage == 1))
936				SetZsHugePage(zspage);
937		} else {
938			prev_page->index = (unsigned long)page;
939		}
940		prev_page = page;
941	}
942}
943
944/*
945 * Allocate a zspage for the given size class
946 */
947static struct zspage *alloc_zspage(struct zs_pool *pool,
948					struct size_class *class,
949					gfp_t gfp)
950{
951	int i;
952	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
953	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
954
955	if (!zspage)
956		return NULL;
957
958	zspage->magic = ZSPAGE_MAGIC;
959	migrate_lock_init(zspage);
960
961	for (i = 0; i < class->pages_per_zspage; i++) {
962		struct page *page;
963
964		page = alloc_page(gfp);
965		if (!page) {
966			while (--i >= 0) {
967				dec_zone_page_state(pages[i], NR_ZSPAGES);
968				__free_page(pages[i]);
969			}
970			cache_free_zspage(pool, zspage);
971			return NULL;
972		}
973
974		inc_zone_page_state(page, NR_ZSPAGES);
975		pages[i] = page;
976	}
977
978	create_page_chain(class, zspage, pages);
979	init_zspage(class, zspage);
980	zspage->pool = pool;
981	zspage->class = class->index;
982
983	return zspage;
984}
985
986static struct zspage *find_get_zspage(struct size_class *class)
987{
988	int i;
989	struct zspage *zspage;
990
991	for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) {
992		zspage = list_first_entry_or_null(&class->fullness_list[i],
993						  struct zspage, list);
994		if (zspage)
995			break;
996	}
997
998	return zspage;
999}
1000
1001static inline int __zs_cpu_up(struct mapping_area *area)
1002{
1003	/*
1004	 * Make sure we don't leak memory if a cpu UP notification
1005	 * and zs_init() race and both call zs_cpu_up() on the same cpu
1006	 */
1007	if (area->vm_buf)
1008		return 0;
1009	area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1010	if (!area->vm_buf)
1011		return -ENOMEM;
1012	return 0;
1013}
1014
1015static inline void __zs_cpu_down(struct mapping_area *area)
1016{
1017	kfree(area->vm_buf);
1018	area->vm_buf = NULL;
1019}
1020
1021static void *__zs_map_object(struct mapping_area *area,
1022			struct page *pages[2], int off, int size)
1023{
1024	int sizes[2];
1025	void *addr;
1026	char *buf = area->vm_buf;
1027
1028	/* disable page faults to match kmap_atomic() return conditions */
1029	pagefault_disable();
1030
1031	/* no read fastpath */
1032	if (area->vm_mm == ZS_MM_WO)
1033		goto out;
1034
1035	sizes[0] = PAGE_SIZE - off;
1036	sizes[1] = size - sizes[0];
1037
1038	/* copy object to per-cpu buffer */
1039	addr = kmap_atomic(pages[0]);
1040	memcpy(buf, addr + off, sizes[0]);
1041	kunmap_atomic(addr);
1042	addr = kmap_atomic(pages[1]);
1043	memcpy(buf + sizes[0], addr, sizes[1]);
1044	kunmap_atomic(addr);
1045out:
1046	return area->vm_buf;
1047}
1048
1049static void __zs_unmap_object(struct mapping_area *area,
1050			struct page *pages[2], int off, int size)
1051{
1052	int sizes[2];
1053	void *addr;
1054	char *buf;
1055
1056	/* no write fastpath */
1057	if (area->vm_mm == ZS_MM_RO)
1058		goto out;
1059
1060	buf = area->vm_buf;
1061	buf = buf + ZS_HANDLE_SIZE;
1062	size -= ZS_HANDLE_SIZE;
1063	off += ZS_HANDLE_SIZE;
1064
1065	sizes[0] = PAGE_SIZE - off;
1066	sizes[1] = size - sizes[0];
1067
1068	/* copy per-cpu buffer to object */
1069	addr = kmap_atomic(pages[0]);
1070	memcpy(addr + off, buf, sizes[0]);
1071	kunmap_atomic(addr);
1072	addr = kmap_atomic(pages[1]);
1073	memcpy(addr, buf + sizes[0], sizes[1]);
1074	kunmap_atomic(addr);
1075
1076out:
1077	/* enable page faults to match kunmap_atomic() return conditions */
1078	pagefault_enable();
1079}
1080
1081static int zs_cpu_prepare(unsigned int cpu)
1082{
1083	struct mapping_area *area;
1084
1085	area = &per_cpu(zs_map_area, cpu);
1086	return __zs_cpu_up(area);
1087}
1088
1089static int zs_cpu_dead(unsigned int cpu)
1090{
1091	struct mapping_area *area;
1092
1093	area = &per_cpu(zs_map_area, cpu);
1094	__zs_cpu_down(area);
1095	return 0;
1096}
1097
1098static bool can_merge(struct size_class *prev, int pages_per_zspage,
1099					int objs_per_zspage)
1100{
1101	if (prev->pages_per_zspage == pages_per_zspage &&
1102		prev->objs_per_zspage == objs_per_zspage)
1103		return true;
1104
1105	return false;
1106}
1107
1108static bool zspage_full(struct size_class *class, struct zspage *zspage)
1109{
1110	return get_zspage_inuse(zspage) == class->objs_per_zspage;
1111}
1112
1113static bool zspage_empty(struct zspage *zspage)
1114{
1115	return get_zspage_inuse(zspage) == 0;
1116}
1117
1118/**
1119 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1120 * that hold objects of the provided size.
1121 * @pool: zsmalloc pool to use
1122 * @size: object size
1123 *
1124 * Context: Any context.
1125 *
1126 * Return: the index of the zsmalloc &size_class that hold objects of the
1127 * provided size.
1128 */
1129unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1130{
1131	struct size_class *class;
1132
1133	class = pool->size_class[get_size_class_index(size)];
1134
1135	return class->index;
1136}
1137EXPORT_SYMBOL_GPL(zs_lookup_class_index);
1138
1139unsigned long zs_get_total_pages(struct zs_pool *pool)
1140{
1141	return atomic_long_read(&pool->pages_allocated);
1142}
1143EXPORT_SYMBOL_GPL(zs_get_total_pages);
1144
1145/**
1146 * zs_map_object - get address of allocated object from handle.
1147 * @pool: pool from which the object was allocated
1148 * @handle: handle returned from zs_malloc
1149 * @mm: mapping mode to use
1150 *
1151 * Before using an object allocated from zs_malloc, it must be mapped using
1152 * this function. When done with the object, it must be unmapped using
1153 * zs_unmap_object.
1154 *
1155 * Only one object can be mapped per cpu at a time. There is no protection
1156 * against nested mappings.
1157 *
1158 * This function returns with preemption and page faults disabled.
1159 */
1160void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1161			enum zs_mapmode mm)
1162{
1163	struct zspage *zspage;
1164	struct page *page;
1165	unsigned long obj, off;
1166	unsigned int obj_idx;
1167
1168	struct size_class *class;
1169	struct mapping_area *area;
1170	struct page *pages[2];
1171	void *ret;
1172
1173	/*
1174	 * Because we use per-cpu mapping areas shared among the
1175	 * pools/users, we can't allow mapping in interrupt context
1176	 * because it can corrupt another users mappings.
1177	 */
1178	BUG_ON(in_interrupt());
1179
1180	/* It guarantees it can get zspage from handle safely */
1181	spin_lock(&pool->lock);
1182	obj = handle_to_obj(handle);
1183	obj_to_location(obj, &page, &obj_idx);
1184	zspage = get_zspage(page);
1185
1186	/*
1187	 * migration cannot move any zpages in this zspage. Here, pool->lock
1188	 * is too heavy since callers would take some time until they calls
1189	 * zs_unmap_object API so delegate the locking from class to zspage
1190	 * which is smaller granularity.
1191	 */
1192	migrate_read_lock(zspage);
1193	spin_unlock(&pool->lock);
1194
1195	class = zspage_class(pool, zspage);
1196	off = offset_in_page(class->size * obj_idx);
1197
1198	local_lock(&zs_map_area.lock);
1199	area = this_cpu_ptr(&zs_map_area);
1200	area->vm_mm = mm;
1201	if (off + class->size <= PAGE_SIZE) {
1202		/* this object is contained entirely within a page */
1203		area->vm_addr = kmap_atomic(page);
1204		ret = area->vm_addr + off;
1205		goto out;
1206	}
1207
1208	/* this object spans two pages */
1209	pages[0] = page;
1210	pages[1] = get_next_page(page);
1211	BUG_ON(!pages[1]);
1212
1213	ret = __zs_map_object(area, pages, off, class->size);
1214out:
1215	if (likely(!ZsHugePage(zspage)))
1216		ret += ZS_HANDLE_SIZE;
1217
1218	return ret;
1219}
1220EXPORT_SYMBOL_GPL(zs_map_object);
1221
1222void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1223{
1224	struct zspage *zspage;
1225	struct page *page;
1226	unsigned long obj, off;
1227	unsigned int obj_idx;
1228
1229	struct size_class *class;
1230	struct mapping_area *area;
1231
1232	obj = handle_to_obj(handle);
1233	obj_to_location(obj, &page, &obj_idx);
1234	zspage = get_zspage(page);
1235	class = zspage_class(pool, zspage);
1236	off = offset_in_page(class->size * obj_idx);
1237
1238	area = this_cpu_ptr(&zs_map_area);
1239	if (off + class->size <= PAGE_SIZE)
1240		kunmap_atomic(area->vm_addr);
1241	else {
1242		struct page *pages[2];
1243
1244		pages[0] = page;
1245		pages[1] = get_next_page(page);
1246		BUG_ON(!pages[1]);
1247
1248		__zs_unmap_object(area, pages, off, class->size);
1249	}
1250	local_unlock(&zs_map_area.lock);
1251
1252	migrate_read_unlock(zspage);
1253}
1254EXPORT_SYMBOL_GPL(zs_unmap_object);
1255
1256/**
1257 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1258 *                        zsmalloc &size_class.
1259 * @pool: zsmalloc pool to use
1260 *
1261 * The function returns the size of the first huge class - any object of equal
1262 * or bigger size will be stored in zspage consisting of a single physical
1263 * page.
1264 *
1265 * Context: Any context.
1266 *
1267 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1268 */
1269size_t zs_huge_class_size(struct zs_pool *pool)
1270{
1271	return huge_class_size;
1272}
1273EXPORT_SYMBOL_GPL(zs_huge_class_size);
1274
1275static unsigned long obj_malloc(struct zs_pool *pool,
1276				struct zspage *zspage, unsigned long handle)
1277{
1278	int i, nr_page, offset;
1279	unsigned long obj;
1280	struct link_free *link;
1281	struct size_class *class;
1282
1283	struct page *m_page;
1284	unsigned long m_offset;
1285	void *vaddr;
1286
1287	class = pool->size_class[zspage->class];
1288	handle |= OBJ_ALLOCATED_TAG;
1289	obj = get_freeobj(zspage);
1290
1291	offset = obj * class->size;
1292	nr_page = offset >> PAGE_SHIFT;
1293	m_offset = offset_in_page(offset);
1294	m_page = get_first_page(zspage);
1295
1296	for (i = 0; i < nr_page; i++)
1297		m_page = get_next_page(m_page);
1298
1299	vaddr = kmap_atomic(m_page);
1300	link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1301	set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1302	if (likely(!ZsHugePage(zspage)))
1303		/* record handle in the header of allocated chunk */
1304		link->handle = handle;
1305	else
1306		/* record handle to page->index */
1307		zspage->first_page->index = handle;
1308
1309	kunmap_atomic(vaddr);
1310	mod_zspage_inuse(zspage, 1);
1311
1312	obj = location_to_obj(m_page, obj);
1313
1314	return obj;
1315}
1316
1317
1318/**
1319 * zs_malloc - Allocate block of given size from pool.
1320 * @pool: pool to allocate from
1321 * @size: size of block to allocate
1322 * @gfp: gfp flags when allocating object
1323 *
1324 * On success, handle to the allocated object is returned,
1325 * otherwise an ERR_PTR().
1326 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1327 */
1328unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1329{
1330	unsigned long handle, obj;
1331	struct size_class *class;
1332	int newfg;
1333	struct zspage *zspage;
1334
1335	if (unlikely(!size))
1336		return (unsigned long)ERR_PTR(-EINVAL);
1337
1338	if (unlikely(size > ZS_MAX_ALLOC_SIZE))
1339		return (unsigned long)ERR_PTR(-ENOSPC);
1340
1341	handle = cache_alloc_handle(pool, gfp);
1342	if (!handle)
1343		return (unsigned long)ERR_PTR(-ENOMEM);
1344
1345	/* extra space in chunk to keep the handle */
1346	size += ZS_HANDLE_SIZE;
1347	class = pool->size_class[get_size_class_index(size)];
1348
1349	/* pool->lock effectively protects the zpage migration */
1350	spin_lock(&pool->lock);
1351	zspage = find_get_zspage(class);
1352	if (likely(zspage)) {
1353		obj = obj_malloc(pool, zspage, handle);
1354		/* Now move the zspage to another fullness group, if required */
1355		fix_fullness_group(class, zspage);
1356		record_obj(handle, obj);
1357		class_stat_inc(class, ZS_OBJS_INUSE, 1);
1358
1359		goto out;
1360	}
1361
1362	spin_unlock(&pool->lock);
1363
1364	zspage = alloc_zspage(pool, class, gfp);
1365	if (!zspage) {
1366		cache_free_handle(pool, handle);
1367		return (unsigned long)ERR_PTR(-ENOMEM);
1368	}
1369
1370	spin_lock(&pool->lock);
1371	obj = obj_malloc(pool, zspage, handle);
1372	newfg = get_fullness_group(class, zspage);
1373	insert_zspage(class, zspage, newfg);
1374	record_obj(handle, obj);
1375	atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1376	class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
1377	class_stat_inc(class, ZS_OBJS_INUSE, 1);
1378
1379	/* We completely set up zspage so mark them as movable */
1380	SetZsPageMovable(pool, zspage);
1381out:
1382	spin_unlock(&pool->lock);
1383
1384	return handle;
1385}
1386EXPORT_SYMBOL_GPL(zs_malloc);
1387
1388static void obj_free(int class_size, unsigned long obj)
1389{
1390	struct link_free *link;
1391	struct zspage *zspage;
1392	struct page *f_page;
1393	unsigned long f_offset;
1394	unsigned int f_objidx;
1395	void *vaddr;
1396
1397	obj_to_location(obj, &f_page, &f_objidx);
1398	f_offset = offset_in_page(class_size * f_objidx);
1399	zspage = get_zspage(f_page);
1400
1401	vaddr = kmap_atomic(f_page);
1402	link = (struct link_free *)(vaddr + f_offset);
1403
1404	/* Insert this object in containing zspage's freelist */
1405	if (likely(!ZsHugePage(zspage)))
1406		link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1407	else
1408		f_page->index = 0;
1409	set_freeobj(zspage, f_objidx);
1410
1411	kunmap_atomic(vaddr);
1412	mod_zspage_inuse(zspage, -1);
1413}
1414
1415void zs_free(struct zs_pool *pool, unsigned long handle)
1416{
1417	struct zspage *zspage;
1418	struct page *f_page;
1419	unsigned long obj;
1420	struct size_class *class;
1421	int fullness;
1422
1423	if (IS_ERR_OR_NULL((void *)handle))
1424		return;
1425
1426	/*
1427	 * The pool->lock protects the race with zpage's migration
1428	 * so it's safe to get the page from handle.
1429	 */
1430	spin_lock(&pool->lock);
1431	obj = handle_to_obj(handle);
1432	obj_to_page(obj, &f_page);
1433	zspage = get_zspage(f_page);
1434	class = zspage_class(pool, zspage);
1435
1436	class_stat_dec(class, ZS_OBJS_INUSE, 1);
1437	obj_free(class->size, obj);
1438
1439	fullness = fix_fullness_group(class, zspage);
1440	if (fullness == ZS_INUSE_RATIO_0)
1441		free_zspage(pool, class, zspage);
1442
1443	spin_unlock(&pool->lock);
1444	cache_free_handle(pool, handle);
1445}
1446EXPORT_SYMBOL_GPL(zs_free);
1447
1448static void zs_object_copy(struct size_class *class, unsigned long dst,
1449				unsigned long src)
1450{
1451	struct page *s_page, *d_page;
1452	unsigned int s_objidx, d_objidx;
1453	unsigned long s_off, d_off;
1454	void *s_addr, *d_addr;
1455	int s_size, d_size, size;
1456	int written = 0;
1457
1458	s_size = d_size = class->size;
1459
1460	obj_to_location(src, &s_page, &s_objidx);
1461	obj_to_location(dst, &d_page, &d_objidx);
1462
1463	s_off = offset_in_page(class->size * s_objidx);
1464	d_off = offset_in_page(class->size * d_objidx);
1465
1466	if (s_off + class->size > PAGE_SIZE)
1467		s_size = PAGE_SIZE - s_off;
1468
1469	if (d_off + class->size > PAGE_SIZE)
1470		d_size = PAGE_SIZE - d_off;
1471
1472	s_addr = kmap_atomic(s_page);
1473	d_addr = kmap_atomic(d_page);
1474
1475	while (1) {
1476		size = min(s_size, d_size);
1477		memcpy(d_addr + d_off, s_addr + s_off, size);
1478		written += size;
1479
1480		if (written == class->size)
1481			break;
1482
1483		s_off += size;
1484		s_size -= size;
1485		d_off += size;
1486		d_size -= size;
1487
1488		/*
1489		 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
1490		 * calls must occurs in reverse order of calls to kmap_atomic().
1491		 * So, to call kunmap_atomic(s_addr) we should first call
1492		 * kunmap_atomic(d_addr). For more details see
1493		 * Documentation/mm/highmem.rst.
1494		 */
1495		if (s_off >= PAGE_SIZE) {
1496			kunmap_atomic(d_addr);
1497			kunmap_atomic(s_addr);
1498			s_page = get_next_page(s_page);
1499			s_addr = kmap_atomic(s_page);
1500			d_addr = kmap_atomic(d_page);
1501			s_size = class->size - written;
1502			s_off = 0;
1503		}
1504
1505		if (d_off >= PAGE_SIZE) {
1506			kunmap_atomic(d_addr);
1507			d_page = get_next_page(d_page);
1508			d_addr = kmap_atomic(d_page);
1509			d_size = class->size - written;
1510			d_off = 0;
1511		}
1512	}
1513
1514	kunmap_atomic(d_addr);
1515	kunmap_atomic(s_addr);
1516}
1517
1518/*
1519 * Find alloced object in zspage from index object and
1520 * return handle.
1521 */
1522static unsigned long find_alloced_obj(struct size_class *class,
1523				      struct page *page, int *obj_idx)
1524{
1525	unsigned int offset;
1526	int index = *obj_idx;
1527	unsigned long handle = 0;
1528	void *addr = kmap_atomic(page);
1529
1530	offset = get_first_obj_offset(page);
1531	offset += class->size * index;
1532
1533	while (offset < PAGE_SIZE) {
1534		if (obj_allocated(page, addr + offset, &handle))
1535			break;
1536
1537		offset += class->size;
1538		index++;
1539	}
1540
1541	kunmap_atomic(addr);
1542
1543	*obj_idx = index;
1544
1545	return handle;
1546}
1547
1548static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1549			   struct zspage *dst_zspage)
1550{
1551	unsigned long used_obj, free_obj;
1552	unsigned long handle;
1553	int obj_idx = 0;
1554	struct page *s_page = get_first_page(src_zspage);
1555	struct size_class *class = pool->size_class[src_zspage->class];
1556
1557	while (1) {
1558		handle = find_alloced_obj(class, s_page, &obj_idx);
1559		if (!handle) {
1560			s_page = get_next_page(s_page);
1561			if (!s_page)
1562				break;
1563			obj_idx = 0;
1564			continue;
1565		}
1566
1567		used_obj = handle_to_obj(handle);
1568		free_obj = obj_malloc(pool, dst_zspage, handle);
1569		zs_object_copy(class, free_obj, used_obj);
1570		obj_idx++;
1571		record_obj(handle, free_obj);
1572		obj_free(class->size, used_obj);
1573
1574		/* Stop if there is no more space */
1575		if (zspage_full(class, dst_zspage))
1576			break;
1577
1578		/* Stop if there are no more objects to migrate */
1579		if (zspage_empty(src_zspage))
1580			break;
1581	}
1582}
1583
1584static struct zspage *isolate_src_zspage(struct size_class *class)
1585{
1586	struct zspage *zspage;
1587	int fg;
1588
1589	for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) {
1590		zspage = list_first_entry_or_null(&class->fullness_list[fg],
1591						  struct zspage, list);
1592		if (zspage) {
1593			remove_zspage(class, zspage);
1594			return zspage;
1595		}
1596	}
1597
1598	return zspage;
1599}
1600
1601static struct zspage *isolate_dst_zspage(struct size_class *class)
1602{
1603	struct zspage *zspage;
1604	int fg;
1605
1606	for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) {
1607		zspage = list_first_entry_or_null(&class->fullness_list[fg],
1608						  struct zspage, list);
1609		if (zspage) {
1610			remove_zspage(class, zspage);
1611			return zspage;
1612		}
1613	}
1614
1615	return zspage;
1616}
1617
1618/*
1619 * putback_zspage - add @zspage into right class's fullness list
1620 * @class: destination class
1621 * @zspage: target page
1622 *
1623 * Return @zspage's fullness status
1624 */
1625static int putback_zspage(struct size_class *class, struct zspage *zspage)
1626{
1627	int fullness;
1628
1629	fullness = get_fullness_group(class, zspage);
1630	insert_zspage(class, zspage, fullness);
1631
1632	return fullness;
1633}
1634
1635#ifdef CONFIG_COMPACTION
1636/*
1637 * To prevent zspage destroy during migration, zspage freeing should
1638 * hold locks of all pages in the zspage.
1639 */
1640static void lock_zspage(struct zspage *zspage)
1641{
1642	struct page *curr_page, *page;
1643
1644	/*
1645	 * Pages we haven't locked yet can be migrated off the list while we're
1646	 * trying to lock them, so we need to be careful and only attempt to
1647	 * lock each page under migrate_read_lock(). Otherwise, the page we lock
1648	 * may no longer belong to the zspage. This means that we may wait for
1649	 * the wrong page to unlock, so we must take a reference to the page
1650	 * prior to waiting for it to unlock outside migrate_read_lock().
1651	 */
1652	while (1) {
1653		migrate_read_lock(zspage);
1654		page = get_first_page(zspage);
1655		if (trylock_page(page))
1656			break;
1657		get_page(page);
1658		migrate_read_unlock(zspage);
1659		wait_on_page_locked(page);
1660		put_page(page);
1661	}
1662
1663	curr_page = page;
1664	while ((page = get_next_page(curr_page))) {
1665		if (trylock_page(page)) {
1666			curr_page = page;
1667		} else {
1668			get_page(page);
1669			migrate_read_unlock(zspage);
1670			wait_on_page_locked(page);
1671			put_page(page);
1672			migrate_read_lock(zspage);
1673		}
1674	}
1675	migrate_read_unlock(zspage);
1676}
1677#endif /* CONFIG_COMPACTION */
1678
1679static void migrate_lock_init(struct zspage *zspage)
1680{
1681	rwlock_init(&zspage->lock);
1682}
1683
1684static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1685{
1686	read_lock(&zspage->lock);
1687}
1688
1689static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1690{
1691	read_unlock(&zspage->lock);
1692}
1693
1694static void migrate_write_lock(struct zspage *zspage)
1695{
1696	write_lock(&zspage->lock);
1697}
1698
1699static void migrate_write_unlock(struct zspage *zspage)
1700{
1701	write_unlock(&zspage->lock);
1702}
1703
1704#ifdef CONFIG_COMPACTION
1705
1706static const struct movable_operations zsmalloc_mops;
1707
1708static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1709				struct page *newpage, struct page *oldpage)
1710{
1711	struct page *page;
1712	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1713	int idx = 0;
1714
1715	page = get_first_page(zspage);
1716	do {
1717		if (page == oldpage)
1718			pages[idx] = newpage;
1719		else
1720			pages[idx] = page;
1721		idx++;
1722	} while ((page = get_next_page(page)) != NULL);
1723
1724	create_page_chain(class, zspage, pages);
1725	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1726	if (unlikely(ZsHugePage(zspage)))
1727		newpage->index = oldpage->index;
1728	__SetPageMovable(newpage, &zsmalloc_mops);
1729}
1730
1731static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1732{
1733	/*
1734	 * Page is locked so zspage couldn't be destroyed. For detail, look at
1735	 * lock_zspage in free_zspage.
1736	 */
1737	VM_BUG_ON_PAGE(PageIsolated(page), page);
1738
1739	return true;
1740}
1741
1742static int zs_page_migrate(struct page *newpage, struct page *page,
1743		enum migrate_mode mode)
1744{
1745	struct zs_pool *pool;
1746	struct size_class *class;
1747	struct zspage *zspage;
1748	struct page *dummy;
1749	void *s_addr, *d_addr, *addr;
1750	unsigned int offset;
1751	unsigned long handle;
1752	unsigned long old_obj, new_obj;
1753	unsigned int obj_idx;
1754
1755	/*
1756	 * We cannot support the _NO_COPY case here, because copy needs to
1757	 * happen under the zs lock, which does not work with
1758	 * MIGRATE_SYNC_NO_COPY workflow.
1759	 */
1760	if (mode == MIGRATE_SYNC_NO_COPY)
1761		return -EINVAL;
1762
1763	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1764
1765	/* The page is locked, so this pointer must remain valid */
1766	zspage = get_zspage(page);
1767	pool = zspage->pool;
1768
1769	/*
1770	 * The pool's lock protects the race between zpage migration
1771	 * and zs_free.
1772	 */
1773	spin_lock(&pool->lock);
1774	class = zspage_class(pool, zspage);
1775
1776	/* the migrate_write_lock protects zpage access via zs_map_object */
1777	migrate_write_lock(zspage);
1778
1779	offset = get_first_obj_offset(page);
1780	s_addr = kmap_atomic(page);
1781
1782	/*
1783	 * Here, any user cannot access all objects in the zspage so let's move.
1784	 */
1785	d_addr = kmap_atomic(newpage);
1786	copy_page(d_addr, s_addr);
1787	kunmap_atomic(d_addr);
1788
1789	for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
1790					addr += class->size) {
1791		if (obj_allocated(page, addr, &handle)) {
1792
1793			old_obj = handle_to_obj(handle);
1794			obj_to_location(old_obj, &dummy, &obj_idx);
1795			new_obj = (unsigned long)location_to_obj(newpage,
1796								obj_idx);
1797			record_obj(handle, new_obj);
1798		}
1799	}
1800	kunmap_atomic(s_addr);
1801
1802	replace_sub_page(class, zspage, newpage, page);
1803	/*
1804	 * Since we complete the data copy and set up new zspage structure,
1805	 * it's okay to release the pool's lock.
1806	 */
1807	spin_unlock(&pool->lock);
1808	migrate_write_unlock(zspage);
1809
1810	get_page(newpage);
1811	if (page_zone(newpage) != page_zone(page)) {
1812		dec_zone_page_state(page, NR_ZSPAGES);
1813		inc_zone_page_state(newpage, NR_ZSPAGES);
1814	}
1815
1816	reset_page(page);
1817	put_page(page);
1818
1819	return MIGRATEPAGE_SUCCESS;
1820}
1821
1822static void zs_page_putback(struct page *page)
1823{
1824	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1825}
1826
1827static const struct movable_operations zsmalloc_mops = {
1828	.isolate_page = zs_page_isolate,
1829	.migrate_page = zs_page_migrate,
1830	.putback_page = zs_page_putback,
1831};
1832
1833/*
1834 * Caller should hold page_lock of all pages in the zspage
1835 * In here, we cannot use zspage meta data.
1836 */
1837static void async_free_zspage(struct work_struct *work)
1838{
1839	int i;
1840	struct size_class *class;
1841	struct zspage *zspage, *tmp;
1842	LIST_HEAD(free_pages);
1843	struct zs_pool *pool = container_of(work, struct zs_pool,
1844					free_work);
1845
1846	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
1847		class = pool->size_class[i];
1848		if (class->index != i)
1849			continue;
1850
1851		spin_lock(&pool->lock);
1852		list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0],
1853				 &free_pages);
1854		spin_unlock(&pool->lock);
1855	}
1856
1857	list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
1858		list_del(&zspage->list);
1859		lock_zspage(zspage);
1860
1861		spin_lock(&pool->lock);
1862		class = zspage_class(pool, zspage);
1863		__free_zspage(pool, class, zspage);
1864		spin_unlock(&pool->lock);
1865	}
1866};
1867
1868static void kick_deferred_free(struct zs_pool *pool)
1869{
1870	schedule_work(&pool->free_work);
1871}
1872
1873static void zs_flush_migration(struct zs_pool *pool)
1874{
1875	flush_work(&pool->free_work);
1876}
1877
1878static void init_deferred_free(struct zs_pool *pool)
1879{
1880	INIT_WORK(&pool->free_work, async_free_zspage);
1881}
1882
1883static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1884{
1885	struct page *page = get_first_page(zspage);
1886
1887	do {
1888		WARN_ON(!trylock_page(page));
1889		__SetPageMovable(page, &zsmalloc_mops);
1890		unlock_page(page);
1891	} while ((page = get_next_page(page)) != NULL);
1892}
1893#else
1894static inline void zs_flush_migration(struct zs_pool *pool) { }
1895#endif
1896
1897/*
1898 *
1899 * Based on the number of unused allocated objects calculate
1900 * and return the number of pages that we can free.
1901 */
1902static unsigned long zs_can_compact(struct size_class *class)
1903{
1904	unsigned long obj_wasted;
1905	unsigned long obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
1906	unsigned long obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
1907
1908	if (obj_allocated <= obj_used)
1909		return 0;
1910
1911	obj_wasted = obj_allocated - obj_used;
1912	obj_wasted /= class->objs_per_zspage;
1913
1914	return obj_wasted * class->pages_per_zspage;
1915}
1916
1917static unsigned long __zs_compact(struct zs_pool *pool,
1918				  struct size_class *class)
1919{
1920	struct zspage *src_zspage = NULL;
1921	struct zspage *dst_zspage = NULL;
1922	unsigned long pages_freed = 0;
1923
1924	/*
1925	 * protect the race between zpage migration and zs_free
1926	 * as well as zpage allocation/free
1927	 */
1928	spin_lock(&pool->lock);
1929	while (zs_can_compact(class)) {
1930		int fg;
1931
1932		if (!dst_zspage) {
1933			dst_zspage = isolate_dst_zspage(class);
1934			if (!dst_zspage)
1935				break;
1936		}
1937
1938		src_zspage = isolate_src_zspage(class);
1939		if (!src_zspage)
1940			break;
1941
1942		migrate_write_lock(src_zspage);
1943		migrate_zspage(pool, src_zspage, dst_zspage);
1944		migrate_write_unlock(src_zspage);
1945
1946		fg = putback_zspage(class, src_zspage);
1947		if (fg == ZS_INUSE_RATIO_0) {
1948			free_zspage(pool, class, src_zspage);
1949			pages_freed += class->pages_per_zspage;
1950		}
1951		src_zspage = NULL;
1952
1953		if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
1954		    || spin_is_contended(&pool->lock)) {
1955			putback_zspage(class, dst_zspage);
1956			dst_zspage = NULL;
1957
1958			spin_unlock(&pool->lock);
1959			cond_resched();
1960			spin_lock(&pool->lock);
1961		}
1962	}
1963
1964	if (src_zspage)
1965		putback_zspage(class, src_zspage);
1966
1967	if (dst_zspage)
1968		putback_zspage(class, dst_zspage);
1969
1970	spin_unlock(&pool->lock);
1971
1972	return pages_freed;
1973}
1974
1975unsigned long zs_compact(struct zs_pool *pool)
1976{
1977	int i;
1978	struct size_class *class;
1979	unsigned long pages_freed = 0;
1980
1981	/*
1982	 * Pool compaction is performed under pool->lock so it is basically
1983	 * single-threaded. Having more than one thread in __zs_compact()
1984	 * will increase pool->lock contention, which will impact other
1985	 * zsmalloc operations that need pool->lock.
1986	 */
1987	if (atomic_xchg(&pool->compaction_in_progress, 1))
1988		return 0;
1989
1990	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
1991		class = pool->size_class[i];
1992		if (class->index != i)
1993			continue;
1994		pages_freed += __zs_compact(pool, class);
1995	}
1996	atomic_long_add(pages_freed, &pool->stats.pages_compacted);
1997	atomic_set(&pool->compaction_in_progress, 0);
1998
1999	return pages_freed;
2000}
2001EXPORT_SYMBOL_GPL(zs_compact);
2002
2003void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2004{
2005	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2006}
2007EXPORT_SYMBOL_GPL(zs_pool_stats);
2008
2009static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2010		struct shrink_control *sc)
2011{
2012	unsigned long pages_freed;
2013	struct zs_pool *pool = shrinker->private_data;
2014
2015	/*
2016	 * Compact classes and calculate compaction delta.
2017	 * Can run concurrently with a manually triggered
2018	 * (by user) compaction.
2019	 */
2020	pages_freed = zs_compact(pool);
2021
2022	return pages_freed ? pages_freed : SHRINK_STOP;
2023}
2024
2025static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2026		struct shrink_control *sc)
2027{
2028	int i;
2029	struct size_class *class;
2030	unsigned long pages_to_free = 0;
2031	struct zs_pool *pool = shrinker->private_data;
2032
2033	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2034		class = pool->size_class[i];
2035		if (class->index != i)
2036			continue;
2037
2038		pages_to_free += zs_can_compact(class);
2039	}
2040
2041	return pages_to_free;
2042}
2043
2044static void zs_unregister_shrinker(struct zs_pool *pool)
2045{
2046	shrinker_free(pool->shrinker);
2047}
2048
2049static int zs_register_shrinker(struct zs_pool *pool)
2050{
2051	pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name);
2052	if (!pool->shrinker)
2053		return -ENOMEM;
2054
2055	pool->shrinker->scan_objects = zs_shrinker_scan;
2056	pool->shrinker->count_objects = zs_shrinker_count;
2057	pool->shrinker->batch = 0;
2058	pool->shrinker->private_data = pool;
2059
2060	shrinker_register(pool->shrinker);
2061
2062	return 0;
2063}
2064
2065static int calculate_zspage_chain_size(int class_size)
2066{
2067	int i, min_waste = INT_MAX;
2068	int chain_size = 1;
2069
2070	if (is_power_of_2(class_size))
2071		return chain_size;
2072
2073	for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
2074		int waste;
2075
2076		waste = (i * PAGE_SIZE) % class_size;
2077		if (waste < min_waste) {
2078			min_waste = waste;
2079			chain_size = i;
2080		}
2081	}
2082
2083	return chain_size;
2084}
2085
2086/**
2087 * zs_create_pool - Creates an allocation pool to work from.
2088 * @name: pool name to be created
2089 *
2090 * This function must be called before anything when using
2091 * the zsmalloc allocator.
2092 *
2093 * On success, a pointer to the newly created pool is returned,
2094 * otherwise NULL.
2095 */
2096struct zs_pool *zs_create_pool(const char *name)
2097{
2098	int i;
2099	struct zs_pool *pool;
2100	struct size_class *prev_class = NULL;
2101
2102	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2103	if (!pool)
2104		return NULL;
2105
2106	init_deferred_free(pool);
2107	spin_lock_init(&pool->lock);
2108	atomic_set(&pool->compaction_in_progress, 0);
2109
2110	pool->name = kstrdup(name, GFP_KERNEL);
2111	if (!pool->name)
2112		goto err;
2113
2114	if (create_cache(pool))
2115		goto err;
2116
2117	/*
2118	 * Iterate reversely, because, size of size_class that we want to use
2119	 * for merging should be larger or equal to current size.
2120	 */
2121	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2122		int size;
2123		int pages_per_zspage;
2124		int objs_per_zspage;
2125		struct size_class *class;
2126		int fullness;
2127
2128		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2129		if (size > ZS_MAX_ALLOC_SIZE)
2130			size = ZS_MAX_ALLOC_SIZE;
2131		pages_per_zspage = calculate_zspage_chain_size(size);
2132		objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2133
2134		/*
2135		 * We iterate from biggest down to smallest classes,
2136		 * so huge_class_size holds the size of the first huge
2137		 * class. Any object bigger than or equal to that will
2138		 * endup in the huge class.
2139		 */
2140		if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2141				!huge_class_size) {
2142			huge_class_size = size;
2143			/*
2144			 * The object uses ZS_HANDLE_SIZE bytes to store the
2145			 * handle. We need to subtract it, because zs_malloc()
2146			 * unconditionally adds handle size before it performs
2147			 * size class search - so object may be smaller than
2148			 * huge class size, yet it still can end up in the huge
2149			 * class because it grows by ZS_HANDLE_SIZE extra bytes
2150			 * right before class lookup.
2151			 */
2152			huge_class_size -= (ZS_HANDLE_SIZE - 1);
2153		}
2154
2155		/*
2156		 * size_class is used for normal zsmalloc operation such
2157		 * as alloc/free for that size. Although it is natural that we
2158		 * have one size_class for each size, there is a chance that we
2159		 * can get more memory utilization if we use one size_class for
2160		 * many different sizes whose size_class have same
2161		 * characteristics. So, we makes size_class point to
2162		 * previous size_class if possible.
2163		 */
2164		if (prev_class) {
2165			if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2166				pool->size_class[i] = prev_class;
2167				continue;
2168			}
2169		}
2170
2171		class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2172		if (!class)
2173			goto err;
2174
2175		class->size = size;
2176		class->index = i;
2177		class->pages_per_zspage = pages_per_zspage;
2178		class->objs_per_zspage = objs_per_zspage;
2179		pool->size_class[i] = class;
2180
2181		fullness = ZS_INUSE_RATIO_0;
2182		while (fullness < NR_FULLNESS_GROUPS) {
2183			INIT_LIST_HEAD(&class->fullness_list[fullness]);
2184			fullness++;
2185		}
2186
2187		prev_class = class;
2188	}
2189
2190	/* debug only, don't abort if it fails */
2191	zs_pool_stat_create(pool, name);
2192
2193	/*
2194	 * Not critical since shrinker is only used to trigger internal
2195	 * defragmentation of the pool which is pretty optional thing.  If
2196	 * registration fails we still can use the pool normally and user can
2197	 * trigger compaction manually. Thus, ignore return code.
2198	 */
2199	zs_register_shrinker(pool);
2200
2201	return pool;
2202
2203err:
2204	zs_destroy_pool(pool);
2205	return NULL;
2206}
2207EXPORT_SYMBOL_GPL(zs_create_pool);
2208
2209void zs_destroy_pool(struct zs_pool *pool)
2210{
2211	int i;
2212
2213	zs_unregister_shrinker(pool);
2214	zs_flush_migration(pool);
2215	zs_pool_stat_destroy(pool);
2216
2217	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2218		int fg;
2219		struct size_class *class = pool->size_class[i];
2220
2221		if (!class)
2222			continue;
2223
2224		if (class->index != i)
2225			continue;
2226
2227		for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) {
2228			if (list_empty(&class->fullness_list[fg]))
2229				continue;
2230
2231			pr_err("Class-%d fullness group %d is not empty\n",
2232			       class->size, fg);
2233		}
2234		kfree(class);
2235	}
2236
2237	destroy_cache(pool);
2238	kfree(pool->name);
2239	kfree(pool);
2240}
2241EXPORT_SYMBOL_GPL(zs_destroy_pool);
2242
2243static int __init zs_init(void)
2244{
2245	int ret;
2246
2247	ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2248				zs_cpu_prepare, zs_cpu_dead);
2249	if (ret)
2250		goto out;
2251
2252#ifdef CONFIG_ZPOOL
2253	zpool_register_driver(&zs_zpool_driver);
2254#endif
2255
2256	zs_stat_init();
2257
2258	return 0;
2259
2260out:
2261	return ret;
2262}
2263
2264static void __exit zs_exit(void)
2265{
2266#ifdef CONFIG_ZPOOL
2267	zpool_unregister_driver(&zs_zpool_driver);
2268#endif
2269	cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2270
2271	zs_stat_exit();
2272}
2273
2274module_init(zs_init);
2275module_exit(zs_exit);
2276
2277MODULE_LICENSE("Dual BSD/GPL");
2278MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2279