1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/kfence.h>
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/uaccess.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
21#include <linux/debugfs.h>
22#include <linux/kasan.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <linux/memcontrol.h>
27#include <linux/stackdepot.h>
28
29#include "internal.h"
30#include "slab.h"
31
32#define CREATE_TRACE_POINTS
33#include <trace/events/kmem.h>
34
35enum slab_state slab_state;
36LIST_HEAD(slab_caches);
37DEFINE_MUTEX(slab_mutex);
38struct kmem_cache *kmem_cache;
39
40static LIST_HEAD(slab_caches_to_rcu_destroy);
41static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
42static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
43		    slab_caches_to_rcu_destroy_workfn);
44
45/*
46 * Set of flags that will prevent slab merging
47 */
48#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
49		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
50		SLAB_FAILSLAB | kasan_never_merge())
51
52#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
53			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54
55/*
56 * Merge control. If this is set then no merging of slab caches will occur.
57 */
58static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
59
60static int __init setup_slab_nomerge(char *str)
61{
62	slab_nomerge = true;
63	return 1;
64}
65
66static int __init setup_slab_merge(char *str)
67{
68	slab_nomerge = false;
69	return 1;
70}
71
72#ifdef CONFIG_SLUB
73__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
74__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
75#endif
76
77__setup("slab_nomerge", setup_slab_nomerge);
78__setup("slab_merge", setup_slab_merge);
79
80/*
81 * Determine the size of a slab object
82 */
83unsigned int kmem_cache_size(struct kmem_cache *s)
84{
85	return s->object_size;
86}
87EXPORT_SYMBOL(kmem_cache_size);
88
89#ifdef CONFIG_DEBUG_VM
90static int kmem_cache_sanity_check(const char *name, unsigned int size)
91{
92	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
93		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
94		return -EINVAL;
95	}
96
97	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
98	return 0;
99}
100#else
101static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
102{
103	return 0;
104}
105#endif
106
107/*
108 * Figure out what the alignment of the objects will be given a set of
109 * flags, a user specified alignment and the size of the objects.
110 */
111static unsigned int calculate_alignment(slab_flags_t flags,
112		unsigned int align, unsigned int size)
113{
114	/*
115	 * If the user wants hardware cache aligned objects then follow that
116	 * suggestion if the object is sufficiently large.
117	 *
118	 * The hardware cache alignment cannot override the specified
119	 * alignment though. If that is greater then use it.
120	 */
121	if (flags & SLAB_HWCACHE_ALIGN) {
122		unsigned int ralign;
123
124		ralign = cache_line_size();
125		while (size <= ralign / 2)
126			ralign /= 2;
127		align = max(align, ralign);
128	}
129
130	align = max(align, arch_slab_minalign());
131
132	return ALIGN(align, sizeof(void *));
133}
134
135/*
136 * Find a mergeable slab cache
137 */
138int slab_unmergeable(struct kmem_cache *s)
139{
140	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
141		return 1;
142
143	if (s->ctor)
144		return 1;
145
146	if (s->usersize)
147		return 1;
148
149	/*
150	 * We may have set a slab to be unmergeable during bootstrap.
151	 */
152	if (s->refcount < 0)
153		return 1;
154
155	return 0;
156}
157
158struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
159		slab_flags_t flags, const char *name, void (*ctor)(void *))
160{
161	struct kmem_cache *s;
162
163	if (slab_nomerge)
164		return NULL;
165
166	if (ctor)
167		return NULL;
168
169	size = ALIGN(size, sizeof(void *));
170	align = calculate_alignment(flags, align, size);
171	size = ALIGN(size, align);
172	flags = kmem_cache_flags(size, flags, name);
173
174	if (flags & SLAB_NEVER_MERGE)
175		return NULL;
176
177	list_for_each_entry_reverse(s, &slab_caches, list) {
178		if (slab_unmergeable(s))
179			continue;
180
181		if (size > s->size)
182			continue;
183
184		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
185			continue;
186		/*
187		 * Check if alignment is compatible.
188		 * Courtesy of Adrian Drzewiecki
189		 */
190		if ((s->size & ~(align - 1)) != s->size)
191			continue;
192
193		if (s->size - size >= sizeof(void *))
194			continue;
195
196		if (IS_ENABLED(CONFIG_SLAB) && align &&
197			(align > s->align || s->align % align))
198			continue;
199
200		return s;
201	}
202	return NULL;
203}
204
205static struct kmem_cache *create_cache(const char *name,
206		unsigned int object_size, unsigned int align,
207		slab_flags_t flags, unsigned int useroffset,
208		unsigned int usersize, void (*ctor)(void *),
209		struct kmem_cache *root_cache)
210{
211	struct kmem_cache *s;
212	int err;
213
214	if (WARN_ON(useroffset + usersize > object_size))
215		useroffset = usersize = 0;
216
217	err = -ENOMEM;
218	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
219	if (!s)
220		goto out;
221
222	s->name = name;
223	s->size = s->object_size = object_size;
224	s->align = align;
225	s->ctor = ctor;
226	s->useroffset = useroffset;
227	s->usersize = usersize;
228
229	err = __kmem_cache_create(s, flags);
230	if (err)
231		goto out_free_cache;
232
233	s->refcount = 1;
234	list_add(&s->list, &slab_caches);
235out:
236	if (err)
237		return ERR_PTR(err);
238	return s;
239
240out_free_cache:
241	kmem_cache_free(kmem_cache, s);
242	goto out;
243}
244
245/**
246 * kmem_cache_create_usercopy - Create a cache with a region suitable
247 * for copying to userspace
248 * @name: A string which is used in /proc/slabinfo to identify this cache.
249 * @size: The size of objects to be created in this cache.
250 * @align: The required alignment for the objects.
251 * @flags: SLAB flags
252 * @useroffset: Usercopy region offset
253 * @usersize: Usercopy region size
254 * @ctor: A constructor for the objects.
255 *
256 * Cannot be called within a interrupt, but can be interrupted.
257 * The @ctor is run when new pages are allocated by the cache.
258 *
259 * The flags are
260 *
261 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
262 * to catch references to uninitialised memory.
263 *
264 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
265 * for buffer overruns.
266 *
267 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
268 * cacheline.  This can be beneficial if you're counting cycles as closely
269 * as davem.
270 *
271 * Return: a pointer to the cache on success, NULL on failure.
272 */
273struct kmem_cache *
274kmem_cache_create_usercopy(const char *name,
275		  unsigned int size, unsigned int align,
276		  slab_flags_t flags,
277		  unsigned int useroffset, unsigned int usersize,
278		  void (*ctor)(void *))
279{
280	struct kmem_cache *s = NULL;
281	const char *cache_name;
282	int err;
283
284#ifdef CONFIG_SLUB_DEBUG
285	/*
286	 * If no slub_debug was enabled globally, the static key is not yet
287	 * enabled by setup_slub_debug(). Enable it if the cache is being
288	 * created with any of the debugging flags passed explicitly.
289	 * It's also possible that this is the first cache created with
290	 * SLAB_STORE_USER and we should init stack_depot for it.
291	 */
292	if (flags & SLAB_DEBUG_FLAGS)
293		static_branch_enable(&slub_debug_enabled);
294	if (flags & SLAB_STORE_USER)
295		stack_depot_init();
296#endif
297
298	mutex_lock(&slab_mutex);
299
300	err = kmem_cache_sanity_check(name, size);
301	if (err) {
302		goto out_unlock;
303	}
304
305	/* Refuse requests with allocator specific flags */
306	if (flags & ~SLAB_FLAGS_PERMITTED) {
307		err = -EINVAL;
308		goto out_unlock;
309	}
310
311	/*
312	 * Some allocators will constraint the set of valid flags to a subset
313	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
314	 * case, and we'll just provide them with a sanitized version of the
315	 * passed flags.
316	 */
317	flags &= CACHE_CREATE_MASK;
318
319	/* Fail closed on bad usersize of useroffset values. */
320	if (WARN_ON(!usersize && useroffset) ||
321	    WARN_ON(size < usersize || size - usersize < useroffset))
322		usersize = useroffset = 0;
323
324	if (!usersize)
325		s = __kmem_cache_alias(name, size, align, flags, ctor);
326	if (s)
327		goto out_unlock;
328
329	cache_name = kstrdup_const(name, GFP_KERNEL);
330	if (!cache_name) {
331		err = -ENOMEM;
332		goto out_unlock;
333	}
334
335	s = create_cache(cache_name, size,
336			 calculate_alignment(flags, align, size),
337			 flags, useroffset, usersize, ctor, NULL);
338	if (IS_ERR(s)) {
339		err = PTR_ERR(s);
340		kfree_const(cache_name);
341	}
342
343out_unlock:
344	mutex_unlock(&slab_mutex);
345
346	if (err) {
347		if (flags & SLAB_PANIC)
348			panic("%s: Failed to create slab '%s'. Error %d\n",
349				__func__, name, err);
350		else {
351			pr_warn("%s(%s) failed with error %d\n",
352				__func__, name, err);
353			dump_stack();
354		}
355		return NULL;
356	}
357	return s;
358}
359EXPORT_SYMBOL(kmem_cache_create_usercopy);
360
361/**
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
366 * @flags: SLAB flags
367 * @ctor: A constructor for the objects.
368 *
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
371 *
372 * The flags are
373 *
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
376 *
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
379 *
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline.  This can be beneficial if you're counting cycles as closely
382 * as davem.
383 *
384 * Return: a pointer to the cache on success, NULL on failure.
385 */
386struct kmem_cache *
387kmem_cache_create(const char *name, unsigned int size, unsigned int align,
388		slab_flags_t flags, void (*ctor)(void *))
389{
390	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
391					  ctor);
392}
393EXPORT_SYMBOL(kmem_cache_create);
394
395static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
396{
397	LIST_HEAD(to_destroy);
398	struct kmem_cache *s, *s2;
399
400	/*
401	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
402	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
403	 * through RCU and the associated kmem_cache are dereferenced
404	 * while freeing the pages, so the kmem_caches should be freed only
405	 * after the pending RCU operations are finished.  As rcu_barrier()
406	 * is a pretty slow operation, we batch all pending destructions
407	 * asynchronously.
408	 */
409	mutex_lock(&slab_mutex);
410	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
411	mutex_unlock(&slab_mutex);
412
413	if (list_empty(&to_destroy))
414		return;
415
416	rcu_barrier();
417
418	list_for_each_entry_safe(s, s2, &to_destroy, list) {
419		debugfs_slab_release(s);
420		kfence_shutdown_cache(s);
421#ifdef SLAB_SUPPORTS_SYSFS
422		sysfs_slab_release(s);
423#else
424		slab_kmem_cache_release(s);
425#endif
426	}
427}
428
429static int shutdown_cache(struct kmem_cache *s)
430{
431	/* free asan quarantined objects */
432	kasan_cache_shutdown(s);
433
434	if (__kmem_cache_shutdown(s) != 0)
435		return -EBUSY;
436
437	list_del(&s->list);
438
439	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
440#ifdef SLAB_SUPPORTS_SYSFS
441		sysfs_slab_unlink(s);
442#endif
443		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
444		schedule_work(&slab_caches_to_rcu_destroy_work);
445	} else {
446		kfence_shutdown_cache(s);
447		debugfs_slab_release(s);
448#ifdef SLAB_SUPPORTS_SYSFS
449		sysfs_slab_unlink(s);
450		sysfs_slab_release(s);
451#else
452		slab_kmem_cache_release(s);
453#endif
454	}
455
456	return 0;
457}
458
459void slab_kmem_cache_release(struct kmem_cache *s)
460{
461	__kmem_cache_release(s);
462	kfree_const(s->name);
463	kmem_cache_free(kmem_cache, s);
464}
465
466void kmem_cache_destroy(struct kmem_cache *s)
467{
468	if (unlikely(!s) || !kasan_check_byte(s))
469		return;
470
471	cpus_read_lock();
472	mutex_lock(&slab_mutex);
473
474	s->refcount--;
475	if (s->refcount)
476		goto out_unlock;
477
478	WARN(shutdown_cache(s),
479	     "%s %s: Slab cache still has objects when called from %pS",
480	     __func__, s->name, (void *)_RET_IP_);
481out_unlock:
482	mutex_unlock(&slab_mutex);
483	cpus_read_unlock();
484}
485EXPORT_SYMBOL(kmem_cache_destroy);
486
487/**
488 * kmem_cache_shrink - Shrink a cache.
489 * @cachep: The cache to shrink.
490 *
491 * Releases as many slabs as possible for a cache.
492 * To help debugging, a zero exit status indicates all slabs were released.
493 *
494 * Return: %0 if all slabs were released, non-zero otherwise
495 */
496int kmem_cache_shrink(struct kmem_cache *cachep)
497{
498	int ret;
499
500
501	kasan_cache_shrink(cachep);
502	ret = __kmem_cache_shrink(cachep);
503
504	return ret;
505}
506EXPORT_SYMBOL(kmem_cache_shrink);
507
508bool slab_is_available(void)
509{
510	return slab_state >= UP;
511}
512
513#ifdef CONFIG_PRINTK
514/**
515 * kmem_valid_obj - does the pointer reference a valid slab object?
516 * @object: pointer to query.
517 *
518 * Return: %true if the pointer is to a not-yet-freed object from
519 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
520 * is to an already-freed object, and %false otherwise.
521 */
522bool kmem_valid_obj(void *object)
523{
524	struct folio *folio;
525
526	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
527	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
528		return false;
529	folio = virt_to_folio(object);
530	return folio_test_slab(folio);
531}
532EXPORT_SYMBOL_GPL(kmem_valid_obj);
533
534static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
535{
536	if (__kfence_obj_info(kpp, object, slab))
537		return;
538	__kmem_obj_info(kpp, object, slab);
539}
540
541/**
542 * kmem_dump_obj - Print available slab provenance information
543 * @object: slab object for which to find provenance information.
544 *
545 * This function uses pr_cont(), so that the caller is expected to have
546 * printed out whatever preamble is appropriate.  The provenance information
547 * depends on the type of object and on how much debugging is enabled.
548 * For a slab-cache object, the fact that it is a slab object is printed,
549 * and, if available, the slab name, return address, and stack trace from
550 * the allocation and last free path of that object.
551 *
552 * This function will splat if passed a pointer to a non-slab object.
553 * If you are not sure what type of object you have, you should instead
554 * use mem_dump_obj().
555 */
556void kmem_dump_obj(void *object)
557{
558	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
559	int i;
560	struct slab *slab;
561	unsigned long ptroffset;
562	struct kmem_obj_info kp = { };
563
564	if (WARN_ON_ONCE(!virt_addr_valid(object)))
565		return;
566	slab = virt_to_slab(object);
567	if (WARN_ON_ONCE(!slab)) {
568		pr_cont(" non-slab memory.\n");
569		return;
570	}
571	kmem_obj_info(&kp, object, slab);
572	if (kp.kp_slab_cache)
573		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
574	else
575		pr_cont(" slab%s", cp);
576	if (is_kfence_address(object))
577		pr_cont(" (kfence)");
578	if (kp.kp_objp)
579		pr_cont(" start %px", kp.kp_objp);
580	if (kp.kp_data_offset)
581		pr_cont(" data offset %lu", kp.kp_data_offset);
582	if (kp.kp_objp) {
583		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
584		pr_cont(" pointer offset %lu", ptroffset);
585	}
586	if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
587		pr_cont(" size %u", kp.kp_slab_cache->usersize);
588	if (kp.kp_ret)
589		pr_cont(" allocated at %pS\n", kp.kp_ret);
590	else
591		pr_cont("\n");
592	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
593		if (!kp.kp_stack[i])
594			break;
595		pr_info("    %pS\n", kp.kp_stack[i]);
596	}
597
598	if (kp.kp_free_stack[0])
599		pr_cont(" Free path:\n");
600
601	for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
602		if (!kp.kp_free_stack[i])
603			break;
604		pr_info("    %pS\n", kp.kp_free_stack[i]);
605	}
606
607}
608EXPORT_SYMBOL_GPL(kmem_dump_obj);
609#endif
610
611#ifndef CONFIG_SLOB
612/* Create a cache during boot when no slab services are available yet */
613void __init create_boot_cache(struct kmem_cache *s, const char *name,
614		unsigned int size, slab_flags_t flags,
615		unsigned int useroffset, unsigned int usersize)
616{
617	int err;
618	unsigned int align = ARCH_KMALLOC_MINALIGN;
619
620	s->name = name;
621	s->size = s->object_size = size;
622
623	/*
624	 * For power of two sizes, guarantee natural alignment for kmalloc
625	 * caches, regardless of SL*B debugging options.
626	 */
627	if (is_power_of_2(size))
628		align = max(align, size);
629	s->align = calculate_alignment(flags, align, size);
630
631	s->useroffset = useroffset;
632	s->usersize = usersize;
633
634	err = __kmem_cache_create(s, flags);
635
636	if (err)
637		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
638					name, size, err);
639
640	s->refcount = -1;	/* Exempt from merging for now */
641}
642
643struct kmem_cache *__init create_kmalloc_cache(const char *name,
644		unsigned int size, slab_flags_t flags,
645		unsigned int useroffset, unsigned int usersize)
646{
647	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
648
649	if (!s)
650		panic("Out of memory when creating slab %s\n", name);
651
652	create_boot_cache(s, name, size, flags, useroffset, usersize);
653	kasan_cache_create_kmalloc(s);
654	list_add(&s->list, &slab_caches);
655	s->refcount = 1;
656	return s;
657}
658
659struct kmem_cache *
660kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
661{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
662EXPORT_SYMBOL(kmalloc_caches);
663
664/*
665 * Conversion table for small slabs sizes / 8 to the index in the
666 * kmalloc array. This is necessary for slabs < 192 since we have non power
667 * of two cache sizes there. The size of larger slabs can be determined using
668 * fls.
669 */
670static u8 size_index[24] __ro_after_init = {
671	3,	/* 8 */
672	4,	/* 16 */
673	5,	/* 24 */
674	5,	/* 32 */
675	6,	/* 40 */
676	6,	/* 48 */
677	6,	/* 56 */
678	6,	/* 64 */
679	1,	/* 72 */
680	1,	/* 80 */
681	1,	/* 88 */
682	1,	/* 96 */
683	7,	/* 104 */
684	7,	/* 112 */
685	7,	/* 120 */
686	7,	/* 128 */
687	2,	/* 136 */
688	2,	/* 144 */
689	2,	/* 152 */
690	2,	/* 160 */
691	2,	/* 168 */
692	2,	/* 176 */
693	2,	/* 184 */
694	2	/* 192 */
695};
696
697static inline unsigned int size_index_elem(unsigned int bytes)
698{
699	return (bytes - 1) / 8;
700}
701
702/*
703 * Find the kmem_cache structure that serves a given size of
704 * allocation
705 */
706struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
707{
708	unsigned int index;
709
710	if (size <= 192) {
711		if (!size)
712			return ZERO_SIZE_PTR;
713
714		index = size_index[size_index_elem(size)];
715	} else {
716		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
717			return NULL;
718		index = fls(size - 1);
719	}
720
721	return kmalloc_caches[kmalloc_type(flags)][index];
722}
723
724#ifdef CONFIG_ZONE_DMA
725#define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
726#else
727#define KMALLOC_DMA_NAME(sz)
728#endif
729
730#ifdef CONFIG_MEMCG_KMEM
731#define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
732#else
733#define KMALLOC_CGROUP_NAME(sz)
734#endif
735
736#define INIT_KMALLOC_INFO(__size, __short_size)			\
737{								\
738	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
739	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
740	KMALLOC_CGROUP_NAME(__short_size)			\
741	KMALLOC_DMA_NAME(__short_size)				\
742	.size = __size,						\
743}
744
745/*
746 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
747 * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
748 * kmalloc-32M.
749 */
750const struct kmalloc_info_struct kmalloc_info[] __initconst = {
751	INIT_KMALLOC_INFO(0, 0),
752	INIT_KMALLOC_INFO(96, 96),
753	INIT_KMALLOC_INFO(192, 192),
754	INIT_KMALLOC_INFO(8, 8),
755	INIT_KMALLOC_INFO(16, 16),
756	INIT_KMALLOC_INFO(32, 32),
757	INIT_KMALLOC_INFO(64, 64),
758	INIT_KMALLOC_INFO(128, 128),
759	INIT_KMALLOC_INFO(256, 256),
760	INIT_KMALLOC_INFO(512, 512),
761	INIT_KMALLOC_INFO(1024, 1k),
762	INIT_KMALLOC_INFO(2048, 2k),
763	INIT_KMALLOC_INFO(4096, 4k),
764	INIT_KMALLOC_INFO(8192, 8k),
765	INIT_KMALLOC_INFO(16384, 16k),
766	INIT_KMALLOC_INFO(32768, 32k),
767	INIT_KMALLOC_INFO(65536, 64k),
768	INIT_KMALLOC_INFO(131072, 128k),
769	INIT_KMALLOC_INFO(262144, 256k),
770	INIT_KMALLOC_INFO(524288, 512k),
771	INIT_KMALLOC_INFO(1048576, 1M),
772	INIT_KMALLOC_INFO(2097152, 2M),
773	INIT_KMALLOC_INFO(4194304, 4M),
774	INIT_KMALLOC_INFO(8388608, 8M),
775	INIT_KMALLOC_INFO(16777216, 16M),
776	INIT_KMALLOC_INFO(33554432, 32M)
777};
778
779/*
780 * Patch up the size_index table if we have strange large alignment
781 * requirements for the kmalloc array. This is only the case for
782 * MIPS it seems. The standard arches will not generate any code here.
783 *
784 * Largest permitted alignment is 256 bytes due to the way we
785 * handle the index determination for the smaller caches.
786 *
787 * Make sure that nothing crazy happens if someone starts tinkering
788 * around with ARCH_KMALLOC_MINALIGN
789 */
790void __init setup_kmalloc_cache_index_table(void)
791{
792	unsigned int i;
793
794	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
795		!is_power_of_2(KMALLOC_MIN_SIZE));
796
797	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
798		unsigned int elem = size_index_elem(i);
799
800		if (elem >= ARRAY_SIZE(size_index))
801			break;
802		size_index[elem] = KMALLOC_SHIFT_LOW;
803	}
804
805	if (KMALLOC_MIN_SIZE >= 64) {
806		/*
807		 * The 96 byte sized cache is not used if the alignment
808		 * is 64 byte.
809		 */
810		for (i = 64 + 8; i <= 96; i += 8)
811			size_index[size_index_elem(i)] = 7;
812
813	}
814
815	if (KMALLOC_MIN_SIZE >= 128) {
816		/*
817		 * The 192 byte sized cache is not used if the alignment
818		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
819		 * instead.
820		 */
821		for (i = 128 + 8; i <= 192; i += 8)
822			size_index[size_index_elem(i)] = 8;
823	}
824}
825
826static void __init
827new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
828{
829	if (type == KMALLOC_RECLAIM) {
830		flags |= SLAB_RECLAIM_ACCOUNT;
831	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
832		if (mem_cgroup_kmem_disabled()) {
833			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
834			return;
835		}
836		flags |= SLAB_ACCOUNT;
837	} else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
838		flags |= SLAB_CACHE_DMA;
839	}
840
841	kmalloc_caches[type][idx] = create_kmalloc_cache(
842					kmalloc_info[idx].name[type],
843					kmalloc_info[idx].size, flags, 0,
844					kmalloc_info[idx].size);
845
846	/*
847	 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
848	 * KMALLOC_NORMAL caches.
849	 */
850	if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
851		kmalloc_caches[type][idx]->refcount = -1;
852}
853
854/*
855 * Create the kmalloc array. Some of the regular kmalloc arrays
856 * may already have been created because they were needed to
857 * enable allocations for slab creation.
858 */
859void __init create_kmalloc_caches(slab_flags_t flags)
860{
861	int i;
862	enum kmalloc_cache_type type;
863
864	/*
865	 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
866	 */
867	for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
868		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
869			if (!kmalloc_caches[type][i])
870				new_kmalloc_cache(i, type, flags);
871
872			/*
873			 * Caches that are not of the two-to-the-power-of size.
874			 * These have to be created immediately after the
875			 * earlier power of two caches
876			 */
877			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
878					!kmalloc_caches[type][1])
879				new_kmalloc_cache(1, type, flags);
880			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
881					!kmalloc_caches[type][2])
882				new_kmalloc_cache(2, type, flags);
883		}
884	}
885
886	/* Kmalloc array is now usable */
887	slab_state = UP;
888}
889#endif /* !CONFIG_SLOB */
890
891gfp_t kmalloc_fix_flags(gfp_t flags)
892{
893	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
894
895	flags &= ~GFP_SLAB_BUG_MASK;
896	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
897			invalid_mask, &invalid_mask, flags, &flags);
898	dump_stack();
899
900	return flags;
901}
902
903/*
904 * To avoid unnecessary overhead, we pass through large allocation requests
905 * directly to the page allocator. We use __GFP_COMP, because we will need to
906 * know the allocation order to free the pages properly in kfree.
907 */
908void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
909{
910	void *ret = NULL;
911	struct page *page;
912
913	if (unlikely(flags & GFP_SLAB_BUG_MASK))
914		flags = kmalloc_fix_flags(flags);
915
916	flags |= __GFP_COMP;
917	page = alloc_pages(flags, order);
918	if (likely(page)) {
919		ret = page_address(page);
920		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
921				      PAGE_SIZE << order);
922	}
923	ret = kasan_kmalloc_large(ret, size, flags);
924	/* As ret might get tagged, call kmemleak hook after KASAN. */
925	kmemleak_alloc(ret, size, 1, flags);
926	return ret;
927}
928EXPORT_SYMBOL(kmalloc_order);
929
930#ifdef CONFIG_TRACING
931void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
932{
933	void *ret = kmalloc_order(size, flags, order);
934	trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
935	return ret;
936}
937EXPORT_SYMBOL(kmalloc_order_trace);
938#endif
939
940#ifdef CONFIG_SLAB_FREELIST_RANDOM
941/* Randomize a generic freelist */
942static void freelist_randomize(struct rnd_state *state, unsigned int *list,
943			       unsigned int count)
944{
945	unsigned int rand;
946	unsigned int i;
947
948	for (i = 0; i < count; i++)
949		list[i] = i;
950
951	/* Fisher-Yates shuffle */
952	for (i = count - 1; i > 0; i--) {
953		rand = prandom_u32_state(state);
954		rand %= (i + 1);
955		swap(list[i], list[rand]);
956	}
957}
958
959/* Create a random sequence per cache */
960int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
961				    gfp_t gfp)
962{
963	struct rnd_state state;
964
965	if (count < 2 || cachep->random_seq)
966		return 0;
967
968	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
969	if (!cachep->random_seq)
970		return -ENOMEM;
971
972	/* Get best entropy at this stage of boot */
973	prandom_seed_state(&state, get_random_long());
974
975	freelist_randomize(&state, cachep->random_seq, count);
976	return 0;
977}
978
979/* Destroy the per-cache random freelist sequence */
980void cache_random_seq_destroy(struct kmem_cache *cachep)
981{
982	kfree(cachep->random_seq);
983	cachep->random_seq = NULL;
984}
985#endif /* CONFIG_SLAB_FREELIST_RANDOM */
986
987#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
988#ifdef CONFIG_SLAB
989#define SLABINFO_RIGHTS (0600)
990#else
991#define SLABINFO_RIGHTS (0400)
992#endif
993
994static void print_slabinfo_header(struct seq_file *m)
995{
996	/*
997	 * Output format version, so at least we can change it
998	 * without _too_ many complaints.
999	 */
1000#ifdef CONFIG_DEBUG_SLAB
1001	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1002#else
1003	seq_puts(m, "slabinfo - version: 2.1\n");
1004#endif
1005	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1006	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1007	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1008#ifdef CONFIG_DEBUG_SLAB
1009	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1010	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1011#endif
1012	seq_putc(m, '\n');
1013}
1014
1015static void *slab_start(struct seq_file *m, loff_t *pos)
1016{
1017	mutex_lock(&slab_mutex);
1018	return seq_list_start(&slab_caches, *pos);
1019}
1020
1021static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1022{
1023	return seq_list_next(p, &slab_caches, pos);
1024}
1025
1026static void slab_stop(struct seq_file *m, void *p)
1027{
1028	mutex_unlock(&slab_mutex);
1029}
1030
1031static void cache_show(struct kmem_cache *s, struct seq_file *m)
1032{
1033	struct slabinfo sinfo;
1034
1035	memset(&sinfo, 0, sizeof(sinfo));
1036	get_slabinfo(s, &sinfo);
1037
1038	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1039		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1040		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1041
1042	seq_printf(m, " : tunables %4u %4u %4u",
1043		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1044	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1045		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1046	slabinfo_show_stats(m, s);
1047	seq_putc(m, '\n');
1048}
1049
1050static int slab_show(struct seq_file *m, void *p)
1051{
1052	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1053
1054	if (p == slab_caches.next)
1055		print_slabinfo_header(m);
1056	cache_show(s, m);
1057	return 0;
1058}
1059
1060void dump_unreclaimable_slab(void)
1061{
1062	struct kmem_cache *s;
1063	struct slabinfo sinfo;
1064
1065	/*
1066	 * Here acquiring slab_mutex is risky since we don't prefer to get
1067	 * sleep in oom path. But, without mutex hold, it may introduce a
1068	 * risk of crash.
1069	 * Use mutex_trylock to protect the list traverse, dump nothing
1070	 * without acquiring the mutex.
1071	 */
1072	if (!mutex_trylock(&slab_mutex)) {
1073		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1074		return;
1075	}
1076
1077	pr_info("Unreclaimable slab info:\n");
1078	pr_info("Name                      Used          Total\n");
1079
1080	list_for_each_entry(s, &slab_caches, list) {
1081		if (s->flags & SLAB_RECLAIM_ACCOUNT)
1082			continue;
1083
1084		get_slabinfo(s, &sinfo);
1085
1086		if (sinfo.num_objs > 0)
1087			pr_info("%-17s %10luKB %10luKB\n", s->name,
1088				(sinfo.active_objs * s->size) / 1024,
1089				(sinfo.num_objs * s->size) / 1024);
1090	}
1091	mutex_unlock(&slab_mutex);
1092}
1093
1094/*
1095 * slabinfo_op - iterator that generates /proc/slabinfo
1096 *
1097 * Output layout:
1098 * cache-name
1099 * num-active-objs
1100 * total-objs
1101 * object size
1102 * num-active-slabs
1103 * total-slabs
1104 * num-pages-per-slab
1105 * + further values on SMP and with statistics enabled
1106 */
1107static const struct seq_operations slabinfo_op = {
1108	.start = slab_start,
1109	.next = slab_next,
1110	.stop = slab_stop,
1111	.show = slab_show,
1112};
1113
1114static int slabinfo_open(struct inode *inode, struct file *file)
1115{
1116	return seq_open(file, &slabinfo_op);
1117}
1118
1119static const struct proc_ops slabinfo_proc_ops = {
1120	.proc_flags	= PROC_ENTRY_PERMANENT,
1121	.proc_open	= slabinfo_open,
1122	.proc_read	= seq_read,
1123	.proc_write	= slabinfo_write,
1124	.proc_lseek	= seq_lseek,
1125	.proc_release	= seq_release,
1126};
1127
1128static int __init slab_proc_init(void)
1129{
1130	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1131	return 0;
1132}
1133module_init(slab_proc_init);
1134
1135#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1136
1137static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1138					   gfp_t flags)
1139{
1140	void *ret;
1141	size_t ks;
1142
1143	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
1144	if (likely(!ZERO_OR_NULL_PTR(p))) {
1145		if (!kasan_check_byte(p))
1146			return NULL;
1147		ks = kfence_ksize(p) ?: __ksize(p);
1148	} else
1149		ks = 0;
1150
1151	/* If the object still fits, repoison it precisely. */
1152	if (ks >= new_size) {
1153		p = kasan_krealloc((void *)p, new_size, flags);
1154		return (void *)p;
1155	}
1156
1157	ret = kmalloc_track_caller(new_size, flags);
1158	if (ret && p) {
1159		/* Disable KASAN checks as the object's redzone is accessed. */
1160		kasan_disable_current();
1161		memcpy(ret, kasan_reset_tag(p), ks);
1162		kasan_enable_current();
1163	}
1164
1165	return ret;
1166}
1167
1168/**
1169 * krealloc - reallocate memory. The contents will remain unchanged.
1170 * @p: object to reallocate memory for.
1171 * @new_size: how many bytes of memory are required.
1172 * @flags: the type of memory to allocate.
1173 *
1174 * The contents of the object pointed to are preserved up to the
1175 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1176 * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
1177 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1178 *
1179 * Return: pointer to the allocated memory or %NULL in case of error
1180 */
1181void *krealloc(const void *p, size_t new_size, gfp_t flags)
1182{
1183	void *ret;
1184
1185	if (unlikely(!new_size)) {
1186		kfree(p);
1187		return ZERO_SIZE_PTR;
1188	}
1189
1190	ret = __do_krealloc(p, new_size, flags);
1191	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1192		kfree(p);
1193
1194	return ret;
1195}
1196EXPORT_SYMBOL(krealloc);
1197
1198/**
1199 * kfree_sensitive - Clear sensitive information in memory before freeing
1200 * @p: object to free memory of
1201 *
1202 * The memory of the object @p points to is zeroed before freed.
1203 * If @p is %NULL, kfree_sensitive() does nothing.
1204 *
1205 * Note: this function zeroes the whole allocated buffer which can be a good
1206 * deal bigger than the requested buffer size passed to kmalloc(). So be
1207 * careful when using this function in performance sensitive code.
1208 */
1209void kfree_sensitive(const void *p)
1210{
1211	size_t ks;
1212	void *mem = (void *)p;
1213
1214	ks = ksize(mem);
1215	if (ks)
1216		memzero_explicit(mem, ks);
1217	kfree(mem);
1218}
1219EXPORT_SYMBOL(kfree_sensitive);
1220
1221/**
1222 * ksize - get the actual amount of memory allocated for a given object
1223 * @objp: Pointer to the object
1224 *
1225 * kmalloc may internally round up allocations and return more memory
1226 * than requested. ksize() can be used to determine the actual amount of
1227 * memory allocated. The caller may use this additional memory, even though
1228 * a smaller amount of memory was initially specified with the kmalloc call.
1229 * The caller must guarantee that objp points to a valid object previously
1230 * allocated with either kmalloc() or kmem_cache_alloc(). The object
1231 * must not be freed during the duration of the call.
1232 *
1233 * Return: size of the actual memory used by @objp in bytes
1234 */
1235size_t ksize(const void *objp)
1236{
1237	size_t size;
1238
1239	/*
1240	 * We need to first check that the pointer to the object is valid, and
1241	 * only then unpoison the memory. The report printed from ksize() is
1242	 * more useful, then when it's printed later when the behaviour could
1243	 * be undefined due to a potential use-after-free or double-free.
1244	 *
1245	 * We use kasan_check_byte(), which is supported for the hardware
1246	 * tag-based KASAN mode, unlike kasan_check_read/write().
1247	 *
1248	 * If the pointed to memory is invalid, we return 0 to avoid users of
1249	 * ksize() writing to and potentially corrupting the memory region.
1250	 *
1251	 * We want to perform the check before __ksize(), to avoid potentially
1252	 * crashing in __ksize() due to accessing invalid metadata.
1253	 */
1254	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1255		return 0;
1256
1257	size = kfence_ksize(objp) ?: __ksize(objp);
1258	/*
1259	 * We assume that ksize callers could use whole allocated area,
1260	 * so we need to unpoison this area.
1261	 */
1262	kasan_unpoison_range(objp, size);
1263	return size;
1264}
1265EXPORT_SYMBOL(ksize);
1266
1267/* Tracepoints definitions. */
1268EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1269EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1270EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1271EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1272EXPORT_TRACEPOINT_SYMBOL(kfree);
1273EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1274
1275int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1276{
1277	if (__should_failslab(s, gfpflags))
1278		return -ENOMEM;
1279	return 0;
1280}
1281ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1282