1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8#define pr_fmt(fmt) "ODEBUG: " fmt
9
10#include <linux/debugobjects.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/sched/task_stack.h>
14#include <linux/seq_file.h>
15#include <linux/debugfs.h>
16#include <linux/slab.h>
17#include <linux/hash.h>
18#include <linux/kmemleak.h>
19#include <linux/cpu.h>
20
21#define ODEBUG_HASH_BITS	14
22#define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23
24#define ODEBUG_POOL_SIZE	1024
25#define ODEBUG_POOL_MIN_LEVEL	256
26#define ODEBUG_POOL_PERCPU_SIZE	64
27#define ODEBUG_BATCH_SIZE	16
28
29#define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30#define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31#define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32
33/*
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
37 */
38#define ODEBUG_FREE_WORK_MAX	1024
39#define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40
41struct debug_bucket {
42	struct hlist_head	list;
43	raw_spinlock_t		lock;
44};
45
46/*
47 * Debug object percpu free list
48 * Access is protected by disabling irq
49 */
50struct debug_percpu_free {
51	struct hlist_head	free_objs;
52	int			obj_free;
53};
54
55static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
58
59static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61static DEFINE_RAW_SPINLOCK(pool_lock);
62
63static HLIST_HEAD(obj_pool);
64static HLIST_HEAD(obj_to_free);
65
66/*
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71 * can be off.
72 */
73static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
74static int			obj_pool_free = ODEBUG_POOL_SIZE;
75static int			obj_pool_used;
76static int			obj_pool_max_used;
77static bool			obj_freeing;
78/* The number of objs on the global free list */
79static int			obj_nr_tofree;
80
81static int			debug_objects_maxchain __read_mostly;
82static int __maybe_unused	debug_objects_maxchecked __read_mostly;
83static int			debug_objects_fixups __read_mostly;
84static int			debug_objects_warnings __read_mostly;
85static int			debug_objects_enabled __read_mostly
86				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87static int			debug_objects_pool_size __read_mostly
88				= ODEBUG_POOL_SIZE;
89static int			debug_objects_pool_min_level __read_mostly
90				= ODEBUG_POOL_MIN_LEVEL;
91static const struct debug_obj_descr *descr_test  __read_mostly;
92static struct kmem_cache	*obj_cache __ro_after_init;
93
94/*
95 * Track numbers of kmem_cache_alloc()/free() calls done.
96 */
97static int			debug_objects_allocated;
98static int			debug_objects_freed;
99
100static void free_obj_work(struct work_struct *work);
101static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
103static int __init enable_object_debug(char *str)
104{
105	debug_objects_enabled = 1;
106	return 0;
107}
108
109static int __init disable_object_debug(char *str)
110{
111	debug_objects_enabled = 0;
112	return 0;
113}
114
115early_param("debug_objects", enable_object_debug);
116early_param("no_debug_objects", disable_object_debug);
117
118static const char *obj_states[ODEBUG_STATE_MAX] = {
119	[ODEBUG_STATE_NONE]		= "none",
120	[ODEBUG_STATE_INIT]		= "initialized",
121	[ODEBUG_STATE_INACTIVE]		= "inactive",
122	[ODEBUG_STATE_ACTIVE]		= "active",
123	[ODEBUG_STATE_DESTROYED]	= "destroyed",
124	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
125};
126
127static void fill_pool(void)
128{
129	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130	struct debug_obj *obj;
131	unsigned long flags;
132
133	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134		return;
135
136	/*
137	 * Reuse objs from the global free list; they will be reinitialized
138	 * when allocating.
139	 *
140	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142	 * sections.
143	 */
144	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145		raw_spin_lock_irqsave(&pool_lock, flags);
146		/*
147		 * Recheck with the lock held as the worker thread might have
148		 * won the race and freed the global free list already.
149		 */
150		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152			hlist_del(&obj->node);
153			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154			hlist_add_head(&obj->node, &obj_pool);
155			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156		}
157		raw_spin_unlock_irqrestore(&pool_lock, flags);
158	}
159
160	if (unlikely(!obj_cache))
161		return;
162
163	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164		struct debug_obj *new[ODEBUG_BATCH_SIZE];
165		int cnt;
166
167		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169			if (!new[cnt])
170				break;
171		}
172		if (!cnt)
173			return;
174
175		raw_spin_lock_irqsave(&pool_lock, flags);
176		while (cnt) {
177			hlist_add_head(&new[--cnt]->node, &obj_pool);
178			debug_objects_allocated++;
179			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180		}
181		raw_spin_unlock_irqrestore(&pool_lock, flags);
182	}
183}
184
185/*
186 * Lookup an object in the hash bucket.
187 */
188static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189{
190	struct debug_obj *obj;
191	int cnt = 0;
192
193	hlist_for_each_entry(obj, &b->list, node) {
194		cnt++;
195		if (obj->object == addr)
196			return obj;
197	}
198	if (cnt > debug_objects_maxchain)
199		debug_objects_maxchain = cnt;
200
201	return NULL;
202}
203
204/*
205 * Allocate a new object from the hlist
206 */
207static struct debug_obj *__alloc_object(struct hlist_head *list)
208{
209	struct debug_obj *obj = NULL;
210
211	if (list->first) {
212		obj = hlist_entry(list->first, typeof(*obj), node);
213		hlist_del(&obj->node);
214	}
215
216	return obj;
217}
218
219static struct debug_obj *
220alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
221{
222	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
223	struct debug_obj *obj;
224
225	if (likely(obj_cache)) {
226		obj = __alloc_object(&percpu_pool->free_objs);
227		if (obj) {
228			percpu_pool->obj_free--;
229			goto init_obj;
230		}
231	}
232
233	raw_spin_lock(&pool_lock);
234	obj = __alloc_object(&obj_pool);
235	if (obj) {
236		obj_pool_used++;
237		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
238
239		/*
240		 * Looking ahead, allocate one batch of debug objects and
241		 * put them into the percpu free pool.
242		 */
243		if (likely(obj_cache)) {
244			int i;
245
246			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
247				struct debug_obj *obj2;
248
249				obj2 = __alloc_object(&obj_pool);
250				if (!obj2)
251					break;
252				hlist_add_head(&obj2->node,
253					       &percpu_pool->free_objs);
254				percpu_pool->obj_free++;
255				obj_pool_used++;
256				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
257			}
258		}
259
260		if (obj_pool_used > obj_pool_max_used)
261			obj_pool_max_used = obj_pool_used;
262
263		if (obj_pool_free < obj_pool_min_free)
264			obj_pool_min_free = obj_pool_free;
265	}
266	raw_spin_unlock(&pool_lock);
267
268init_obj:
269	if (obj) {
270		obj->object = addr;
271		obj->descr  = descr;
272		obj->state  = ODEBUG_STATE_NONE;
273		obj->astate = 0;
274		hlist_add_head(&obj->node, &b->list);
275	}
276	return obj;
277}
278
279/*
280 * workqueue function to free objects.
281 *
282 * To reduce contention on the global pool_lock, the actual freeing of
283 * debug objects will be delayed if the pool_lock is busy.
284 */
285static void free_obj_work(struct work_struct *work)
286{
287	struct hlist_node *tmp;
288	struct debug_obj *obj;
289	unsigned long flags;
290	HLIST_HEAD(tofree);
291
292	WRITE_ONCE(obj_freeing, false);
293	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
294		return;
295
296	if (obj_pool_free >= debug_objects_pool_size)
297		goto free_objs;
298
299	/*
300	 * The objs on the pool list might be allocated before the work is
301	 * run, so recheck if pool list it full or not, if not fill pool
302	 * list from the global free list. As it is likely that a workload
303	 * may be gearing up to use more and more objects, don't free any
304	 * of them until the next round.
305	 */
306	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
307		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
308		hlist_del(&obj->node);
309		hlist_add_head(&obj->node, &obj_pool);
310		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
311		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
312	}
313	raw_spin_unlock_irqrestore(&pool_lock, flags);
314	return;
315
316free_objs:
317	/*
318	 * Pool list is already full and there are still objs on the free
319	 * list. Move remaining free objs to a temporary list to free the
320	 * memory outside the pool_lock held region.
321	 */
322	if (obj_nr_tofree) {
323		hlist_move_list(&obj_to_free, &tofree);
324		debug_objects_freed += obj_nr_tofree;
325		WRITE_ONCE(obj_nr_tofree, 0);
326	}
327	raw_spin_unlock_irqrestore(&pool_lock, flags);
328
329	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
330		hlist_del(&obj->node);
331		kmem_cache_free(obj_cache, obj);
332	}
333}
334
335static void __free_object(struct debug_obj *obj)
336{
337	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
338	struct debug_percpu_free *percpu_pool;
339	int lookahead_count = 0;
340	unsigned long flags;
341	bool work;
342
343	local_irq_save(flags);
344	if (!obj_cache)
345		goto free_to_obj_pool;
346
347	/*
348	 * Try to free it into the percpu pool first.
349	 */
350	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
351	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
352		hlist_add_head(&obj->node, &percpu_pool->free_objs);
353		percpu_pool->obj_free++;
354		local_irq_restore(flags);
355		return;
356	}
357
358	/*
359	 * As the percpu pool is full, look ahead and pull out a batch
360	 * of objects from the percpu pool and free them as well.
361	 */
362	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
363		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
364		if (!objs[lookahead_count])
365			break;
366		percpu_pool->obj_free--;
367	}
368
369free_to_obj_pool:
370	raw_spin_lock(&pool_lock);
371	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
372	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
373	obj_pool_used--;
374
375	if (work) {
376		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
377		hlist_add_head(&obj->node, &obj_to_free);
378		if (lookahead_count) {
379			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
380			obj_pool_used -= lookahead_count;
381			while (lookahead_count) {
382				hlist_add_head(&objs[--lookahead_count]->node,
383					       &obj_to_free);
384			}
385		}
386
387		if ((obj_pool_free > debug_objects_pool_size) &&
388		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
389			int i;
390
391			/*
392			 * Free one more batch of objects from obj_pool.
393			 */
394			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
395				obj = __alloc_object(&obj_pool);
396				hlist_add_head(&obj->node, &obj_to_free);
397				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
398				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
399			}
400		}
401	} else {
402		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
403		hlist_add_head(&obj->node, &obj_pool);
404		if (lookahead_count) {
405			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
406			obj_pool_used -= lookahead_count;
407			while (lookahead_count) {
408				hlist_add_head(&objs[--lookahead_count]->node,
409					       &obj_pool);
410			}
411		}
412	}
413	raw_spin_unlock(&pool_lock);
414	local_irq_restore(flags);
415}
416
417/*
418 * Put the object back into the pool and schedule work to free objects
419 * if necessary.
420 */
421static void free_object(struct debug_obj *obj)
422{
423	__free_object(obj);
424	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
425		WRITE_ONCE(obj_freeing, true);
426		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
427	}
428}
429
430#ifdef CONFIG_HOTPLUG_CPU
431static int object_cpu_offline(unsigned int cpu)
432{
433	struct debug_percpu_free *percpu_pool;
434	struct hlist_node *tmp;
435	struct debug_obj *obj;
436	unsigned long flags;
437
438	/* Remote access is safe as the CPU is dead already */
439	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
440	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
441		hlist_del(&obj->node);
442		kmem_cache_free(obj_cache, obj);
443	}
444
445	raw_spin_lock_irqsave(&pool_lock, flags);
446	obj_pool_used -= percpu_pool->obj_free;
447	debug_objects_freed += percpu_pool->obj_free;
448	raw_spin_unlock_irqrestore(&pool_lock, flags);
449
450	percpu_pool->obj_free = 0;
451
452	return 0;
453}
454#endif
455
456/*
457 * We run out of memory. That means we probably have tons of objects
458 * allocated.
459 */
460static void debug_objects_oom(void)
461{
462	struct debug_bucket *db = obj_hash;
463	struct hlist_node *tmp;
464	HLIST_HEAD(freelist);
465	struct debug_obj *obj;
466	unsigned long flags;
467	int i;
468
469	pr_warn("Out of memory. ODEBUG disabled\n");
470
471	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472		raw_spin_lock_irqsave(&db->lock, flags);
473		hlist_move_list(&db->list, &freelist);
474		raw_spin_unlock_irqrestore(&db->lock, flags);
475
476		/* Now free them */
477		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478			hlist_del(&obj->node);
479			free_object(obj);
480		}
481	}
482}
483
484/*
485 * We use the pfn of the address for the hash. That way we can check
486 * for freed objects simply by checking the affected bucket.
487 */
488static struct debug_bucket *get_bucket(unsigned long addr)
489{
490	unsigned long hash;
491
492	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493	return &obj_hash[hash];
494}
495
496static void debug_print_object(struct debug_obj *obj, char *msg)
497{
498	const struct debug_obj_descr *descr = obj->descr;
499	static int limit;
500
501	/*
502	 * Don't report if lookup_object_or_alloc() by the current thread
503	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
504	 * concurrent thread turned off debug_objects_enabled and cleared
505	 * the hash buckets.
506	 */
507	if (!debug_objects_enabled)
508		return;
509
510	if (limit < 5 && descr != descr_test) {
511		void *hint = descr->debug_hint ?
512			descr->debug_hint(obj->object) : NULL;
513		limit++;
514		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
515				 "object: %p object type: %s hint: %pS\n",
516			msg, obj_states[obj->state], obj->astate,
517			obj->object, descr->name, hint);
518	}
519	debug_objects_warnings++;
520}
521
522/*
523 * Try to repair the damage, so we have a better chance to get useful
524 * debug output.
525 */
526static bool
527debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
528		   void * addr, enum debug_obj_state state)
529{
530	if (fixup && fixup(addr, state)) {
531		debug_objects_fixups++;
532		return true;
533	}
534	return false;
535}
536
537static void debug_object_is_on_stack(void *addr, int onstack)
538{
539	int is_on_stack;
540	static int limit;
541
542	if (limit > 4)
543		return;
544
545	is_on_stack = object_is_on_stack(addr);
546	if (is_on_stack == onstack)
547		return;
548
549	limit++;
550	if (is_on_stack)
551		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
552			 task_stack_page(current));
553	else
554		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
555			 task_stack_page(current));
556
557	WARN_ON(1);
558}
559
560static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
561						const struct debug_obj_descr *descr,
562						bool onstack, bool alloc_ifstatic)
563{
564	struct debug_obj *obj = lookup_object(addr, b);
565	enum debug_obj_state state = ODEBUG_STATE_NONE;
566
567	if (likely(obj))
568		return obj;
569
570	/*
571	 * debug_object_init() unconditionally allocates untracked
572	 * objects. It does not matter whether it is a static object or
573	 * not.
574	 *
575	 * debug_object_assert_init() and debug_object_activate() allow
576	 * allocation only if the descriptor callback confirms that the
577	 * object is static and considered initialized. For non-static
578	 * objects the allocation needs to be done from the fixup callback.
579	 */
580	if (unlikely(alloc_ifstatic)) {
581		if (!descr->is_static_object || !descr->is_static_object(addr))
582			return ERR_PTR(-ENOENT);
583		/* Statically allocated objects are considered initialized */
584		state = ODEBUG_STATE_INIT;
585	}
586
587	obj = alloc_object(addr, b, descr);
588	if (likely(obj)) {
589		obj->state = state;
590		debug_object_is_on_stack(addr, onstack);
591		return obj;
592	}
593
594	/* Out of memory. Do the cleanup outside of the locked region */
595	debug_objects_enabled = 0;
596	return NULL;
597}
598
599static void debug_objects_fill_pool(void)
600{
601	/*
602	 * On RT enabled kernels the pool refill must happen in preemptible
603	 * context -- for !RT kernels we rely on the fact that spinlock_t and
604	 * raw_spinlock_t are basically the same type and this lock-type
605	 * inversion works just fine.
606	 */
607	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
608		/*
609		 * Annotate away the spinlock_t inside raw_spinlock_t warning
610		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
611		 * the preemptible() condition above.
612		 */
613		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
614		lock_map_acquire_try(&fill_pool_map);
615		fill_pool();
616		lock_map_release(&fill_pool_map);
617	}
618}
619
620static void
621__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
622{
623	struct debug_obj *obj, o;
624	struct debug_bucket *db;
625	unsigned long flags;
626
627	debug_objects_fill_pool();
628
629	db = get_bucket((unsigned long) addr);
630
631	raw_spin_lock_irqsave(&db->lock, flags);
632
633	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
634	if (unlikely(!obj)) {
635		raw_spin_unlock_irqrestore(&db->lock, flags);
636		debug_objects_oom();
637		return;
638	}
639
640	switch (obj->state) {
641	case ODEBUG_STATE_NONE:
642	case ODEBUG_STATE_INIT:
643	case ODEBUG_STATE_INACTIVE:
644		obj->state = ODEBUG_STATE_INIT;
645		raw_spin_unlock_irqrestore(&db->lock, flags);
646		return;
647	default:
648		break;
649	}
650
651	o = *obj;
652	raw_spin_unlock_irqrestore(&db->lock, flags);
653	debug_print_object(&o, "init");
654
655	if (o.state == ODEBUG_STATE_ACTIVE)
656		debug_object_fixup(descr->fixup_init, addr, o.state);
657}
658
659/**
660 * debug_object_init - debug checks when an object is initialized
661 * @addr:	address of the object
662 * @descr:	pointer to an object specific debug description structure
663 */
664void debug_object_init(void *addr, const struct debug_obj_descr *descr)
665{
666	if (!debug_objects_enabled)
667		return;
668
669	__debug_object_init(addr, descr, 0);
670}
671EXPORT_SYMBOL_GPL(debug_object_init);
672
673/**
674 * debug_object_init_on_stack - debug checks when an object on stack is
675 *				initialized
676 * @addr:	address of the object
677 * @descr:	pointer to an object specific debug description structure
678 */
679void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
680{
681	if (!debug_objects_enabled)
682		return;
683
684	__debug_object_init(addr, descr, 1);
685}
686EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
687
688/**
689 * debug_object_activate - debug checks when an object is activated
690 * @addr:	address of the object
691 * @descr:	pointer to an object specific debug description structure
692 * Returns 0 for success, -EINVAL for check failed.
693 */
694int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
695{
696	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
697	struct debug_bucket *db;
698	struct debug_obj *obj;
699	unsigned long flags;
700
701	if (!debug_objects_enabled)
702		return 0;
703
704	debug_objects_fill_pool();
705
706	db = get_bucket((unsigned long) addr);
707
708	raw_spin_lock_irqsave(&db->lock, flags);
709
710	obj = lookup_object_or_alloc(addr, db, descr, false, true);
711	if (unlikely(!obj)) {
712		raw_spin_unlock_irqrestore(&db->lock, flags);
713		debug_objects_oom();
714		return 0;
715	} else if (likely(!IS_ERR(obj))) {
716		switch (obj->state) {
717		case ODEBUG_STATE_ACTIVE:
718		case ODEBUG_STATE_DESTROYED:
719			o = *obj;
720			break;
721		case ODEBUG_STATE_INIT:
722		case ODEBUG_STATE_INACTIVE:
723			obj->state = ODEBUG_STATE_ACTIVE;
724			fallthrough;
725		default:
726			raw_spin_unlock_irqrestore(&db->lock, flags);
727			return 0;
728		}
729	}
730
731	raw_spin_unlock_irqrestore(&db->lock, flags);
732	debug_print_object(&o, "activate");
733
734	switch (o.state) {
735	case ODEBUG_STATE_ACTIVE:
736	case ODEBUG_STATE_NOTAVAILABLE:
737		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
738			return 0;
739		fallthrough;
740	default:
741		return -EINVAL;
742	}
743}
744EXPORT_SYMBOL_GPL(debug_object_activate);
745
746/**
747 * debug_object_deactivate - debug checks when an object is deactivated
748 * @addr:	address of the object
749 * @descr:	pointer to an object specific debug description structure
750 */
751void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
752{
753	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
754	struct debug_bucket *db;
755	struct debug_obj *obj;
756	unsigned long flags;
757
758	if (!debug_objects_enabled)
759		return;
760
761	db = get_bucket((unsigned long) addr);
762
763	raw_spin_lock_irqsave(&db->lock, flags);
764
765	obj = lookup_object(addr, db);
766	if (obj) {
767		switch (obj->state) {
768		case ODEBUG_STATE_DESTROYED:
769			break;
770		case ODEBUG_STATE_INIT:
771		case ODEBUG_STATE_INACTIVE:
772		case ODEBUG_STATE_ACTIVE:
773			if (obj->astate)
774				break;
775			obj->state = ODEBUG_STATE_INACTIVE;
776			fallthrough;
777		default:
778			raw_spin_unlock_irqrestore(&db->lock, flags);
779			return;
780		}
781		o = *obj;
782	}
783
784	raw_spin_unlock_irqrestore(&db->lock, flags);
785	debug_print_object(&o, "deactivate");
786}
787EXPORT_SYMBOL_GPL(debug_object_deactivate);
788
789/**
790 * debug_object_destroy - debug checks when an object is destroyed
791 * @addr:	address of the object
792 * @descr:	pointer to an object specific debug description structure
793 */
794void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
795{
796	struct debug_obj *obj, o;
797	struct debug_bucket *db;
798	unsigned long flags;
799
800	if (!debug_objects_enabled)
801		return;
802
803	db = get_bucket((unsigned long) addr);
804
805	raw_spin_lock_irqsave(&db->lock, flags);
806
807	obj = lookup_object(addr, db);
808	if (!obj) {
809		raw_spin_unlock_irqrestore(&db->lock, flags);
810		return;
811	}
812
813	switch (obj->state) {
814	case ODEBUG_STATE_ACTIVE:
815	case ODEBUG_STATE_DESTROYED:
816		break;
817	case ODEBUG_STATE_NONE:
818	case ODEBUG_STATE_INIT:
819	case ODEBUG_STATE_INACTIVE:
820		obj->state = ODEBUG_STATE_DESTROYED;
821		fallthrough;
822	default:
823		raw_spin_unlock_irqrestore(&db->lock, flags);
824		return;
825	}
826
827	o = *obj;
828	raw_spin_unlock_irqrestore(&db->lock, flags);
829	debug_print_object(&o, "destroy");
830
831	if (o.state == ODEBUG_STATE_ACTIVE)
832		debug_object_fixup(descr->fixup_destroy, addr, o.state);
833}
834EXPORT_SYMBOL_GPL(debug_object_destroy);
835
836/**
837 * debug_object_free - debug checks when an object is freed
838 * @addr:	address of the object
839 * @descr:	pointer to an object specific debug description structure
840 */
841void debug_object_free(void *addr, const struct debug_obj_descr *descr)
842{
843	struct debug_obj *obj, o;
844	struct debug_bucket *db;
845	unsigned long flags;
846
847	if (!debug_objects_enabled)
848		return;
849
850	db = get_bucket((unsigned long) addr);
851
852	raw_spin_lock_irqsave(&db->lock, flags);
853
854	obj = lookup_object(addr, db);
855	if (!obj) {
856		raw_spin_unlock_irqrestore(&db->lock, flags);
857		return;
858	}
859
860	switch (obj->state) {
861	case ODEBUG_STATE_ACTIVE:
862		break;
863	default:
864		hlist_del(&obj->node);
865		raw_spin_unlock_irqrestore(&db->lock, flags);
866		free_object(obj);
867		return;
868	}
869
870	o = *obj;
871	raw_spin_unlock_irqrestore(&db->lock, flags);
872	debug_print_object(&o, "free");
873
874	debug_object_fixup(descr->fixup_free, addr, o.state);
875}
876EXPORT_SYMBOL_GPL(debug_object_free);
877
878/**
879 * debug_object_assert_init - debug checks when object should be init-ed
880 * @addr:	address of the object
881 * @descr:	pointer to an object specific debug description structure
882 */
883void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
884{
885	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
886	struct debug_bucket *db;
887	struct debug_obj *obj;
888	unsigned long flags;
889
890	if (!debug_objects_enabled)
891		return;
892
893	debug_objects_fill_pool();
894
895	db = get_bucket((unsigned long) addr);
896
897	raw_spin_lock_irqsave(&db->lock, flags);
898	obj = lookup_object_or_alloc(addr, db, descr, false, true);
899	raw_spin_unlock_irqrestore(&db->lock, flags);
900	if (likely(!IS_ERR_OR_NULL(obj)))
901		return;
902
903	/* If NULL the allocation has hit OOM */
904	if (!obj) {
905		debug_objects_oom();
906		return;
907	}
908
909	/* Object is neither tracked nor static. It's not initialized. */
910	debug_print_object(&o, "assert_init");
911	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
912}
913EXPORT_SYMBOL_GPL(debug_object_assert_init);
914
915/**
916 * debug_object_active_state - debug checks object usage state machine
917 * @addr:	address of the object
918 * @descr:	pointer to an object specific debug description structure
919 * @expect:	expected state
920 * @next:	state to move to if expected state is found
921 */
922void
923debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
924			  unsigned int expect, unsigned int next)
925{
926	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
927	struct debug_bucket *db;
928	struct debug_obj *obj;
929	unsigned long flags;
930
931	if (!debug_objects_enabled)
932		return;
933
934	db = get_bucket((unsigned long) addr);
935
936	raw_spin_lock_irqsave(&db->lock, flags);
937
938	obj = lookup_object(addr, db);
939	if (obj) {
940		switch (obj->state) {
941		case ODEBUG_STATE_ACTIVE:
942			if (obj->astate != expect)
943				break;
944			obj->astate = next;
945			raw_spin_unlock_irqrestore(&db->lock, flags);
946			return;
947		default:
948			break;
949		}
950		o = *obj;
951	}
952
953	raw_spin_unlock_irqrestore(&db->lock, flags);
954	debug_print_object(&o, "active_state");
955}
956EXPORT_SYMBOL_GPL(debug_object_active_state);
957
958#ifdef CONFIG_DEBUG_OBJECTS_FREE
959static void __debug_check_no_obj_freed(const void *address, unsigned long size)
960{
961	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
962	int cnt, objs_checked = 0;
963	struct debug_obj *obj, o;
964	struct debug_bucket *db;
965	struct hlist_node *tmp;
966
967	saddr = (unsigned long) address;
968	eaddr = saddr + size;
969	paddr = saddr & ODEBUG_CHUNK_MASK;
970	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
971	chunks >>= ODEBUG_CHUNK_SHIFT;
972
973	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
974		db = get_bucket(paddr);
975
976repeat:
977		cnt = 0;
978		raw_spin_lock_irqsave(&db->lock, flags);
979		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
980			cnt++;
981			oaddr = (unsigned long) obj->object;
982			if (oaddr < saddr || oaddr >= eaddr)
983				continue;
984
985			switch (obj->state) {
986			case ODEBUG_STATE_ACTIVE:
987				o = *obj;
988				raw_spin_unlock_irqrestore(&db->lock, flags);
989				debug_print_object(&o, "free");
990				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
991				goto repeat;
992			default:
993				hlist_del(&obj->node);
994				__free_object(obj);
995				break;
996			}
997		}
998		raw_spin_unlock_irqrestore(&db->lock, flags);
999
1000		if (cnt > debug_objects_maxchain)
1001			debug_objects_maxchain = cnt;
1002
1003		objs_checked += cnt;
1004	}
1005
1006	if (objs_checked > debug_objects_maxchecked)
1007		debug_objects_maxchecked = objs_checked;
1008
1009	/* Schedule work to actually kmem_cache_free() objects */
1010	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1011		WRITE_ONCE(obj_freeing, true);
1012		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1013	}
1014}
1015
1016void debug_check_no_obj_freed(const void *address, unsigned long size)
1017{
1018	if (debug_objects_enabled)
1019		__debug_check_no_obj_freed(address, size);
1020}
1021#endif
1022
1023#ifdef CONFIG_DEBUG_FS
1024
1025static int debug_stats_show(struct seq_file *m, void *v)
1026{
1027	int cpu, obj_percpu_free = 0;
1028
1029	for_each_possible_cpu(cpu)
1030		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1031
1032	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1033	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1034	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1035	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1036	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1037	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1038	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1039	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1040	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1041	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1042	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1043	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1044	return 0;
1045}
1046DEFINE_SHOW_ATTRIBUTE(debug_stats);
1047
1048static int __init debug_objects_init_debugfs(void)
1049{
1050	struct dentry *dbgdir;
1051
1052	if (!debug_objects_enabled)
1053		return 0;
1054
1055	dbgdir = debugfs_create_dir("debug_objects", NULL);
1056
1057	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1058
1059	return 0;
1060}
1061__initcall(debug_objects_init_debugfs);
1062
1063#else
1064static inline void debug_objects_init_debugfs(void) { }
1065#endif
1066
1067#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1068
1069/* Random data structure for the self test */
1070struct self_test {
1071	unsigned long	dummy1[6];
1072	int		static_init;
1073	unsigned long	dummy2[3];
1074};
1075
1076static __initconst const struct debug_obj_descr descr_type_test;
1077
1078static bool __init is_static_object(void *addr)
1079{
1080	struct self_test *obj = addr;
1081
1082	return obj->static_init;
1083}
1084
1085/*
1086 * fixup_init is called when:
1087 * - an active object is initialized
1088 */
1089static bool __init fixup_init(void *addr, enum debug_obj_state state)
1090{
1091	struct self_test *obj = addr;
1092
1093	switch (state) {
1094	case ODEBUG_STATE_ACTIVE:
1095		debug_object_deactivate(obj, &descr_type_test);
1096		debug_object_init(obj, &descr_type_test);
1097		return true;
1098	default:
1099		return false;
1100	}
1101}
1102
1103/*
1104 * fixup_activate is called when:
1105 * - an active object is activated
1106 * - an unknown non-static object is activated
1107 */
1108static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1109{
1110	struct self_test *obj = addr;
1111
1112	switch (state) {
1113	case ODEBUG_STATE_NOTAVAILABLE:
1114		return true;
1115	case ODEBUG_STATE_ACTIVE:
1116		debug_object_deactivate(obj, &descr_type_test);
1117		debug_object_activate(obj, &descr_type_test);
1118		return true;
1119
1120	default:
1121		return false;
1122	}
1123}
1124
1125/*
1126 * fixup_destroy is called when:
1127 * - an active object is destroyed
1128 */
1129static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1130{
1131	struct self_test *obj = addr;
1132
1133	switch (state) {
1134	case ODEBUG_STATE_ACTIVE:
1135		debug_object_deactivate(obj, &descr_type_test);
1136		debug_object_destroy(obj, &descr_type_test);
1137		return true;
1138	default:
1139		return false;
1140	}
1141}
1142
1143/*
1144 * fixup_free is called when:
1145 * - an active object is freed
1146 */
1147static bool __init fixup_free(void *addr, enum debug_obj_state state)
1148{
1149	struct self_test *obj = addr;
1150
1151	switch (state) {
1152	case ODEBUG_STATE_ACTIVE:
1153		debug_object_deactivate(obj, &descr_type_test);
1154		debug_object_free(obj, &descr_type_test);
1155		return true;
1156	default:
1157		return false;
1158	}
1159}
1160
1161static int __init
1162check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1163{
1164	struct debug_bucket *db;
1165	struct debug_obj *obj;
1166	unsigned long flags;
1167	int res = -EINVAL;
1168
1169	db = get_bucket((unsigned long) addr);
1170
1171	raw_spin_lock_irqsave(&db->lock, flags);
1172
1173	obj = lookup_object(addr, db);
1174	if (!obj && state != ODEBUG_STATE_NONE) {
1175		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1176		goto out;
1177	}
1178	if (obj && obj->state != state) {
1179		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1180		       obj->state, state);
1181		goto out;
1182	}
1183	if (fixups != debug_objects_fixups) {
1184		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1185		       fixups, debug_objects_fixups);
1186		goto out;
1187	}
1188	if (warnings != debug_objects_warnings) {
1189		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1190		       warnings, debug_objects_warnings);
1191		goto out;
1192	}
1193	res = 0;
1194out:
1195	raw_spin_unlock_irqrestore(&db->lock, flags);
1196	if (res)
1197		debug_objects_enabled = 0;
1198	return res;
1199}
1200
1201static __initconst const struct debug_obj_descr descr_type_test = {
1202	.name			= "selftest",
1203	.is_static_object	= is_static_object,
1204	.fixup_init		= fixup_init,
1205	.fixup_activate		= fixup_activate,
1206	.fixup_destroy		= fixup_destroy,
1207	.fixup_free		= fixup_free,
1208};
1209
1210static __initdata struct self_test obj = { .static_init = 0 };
1211
1212static void __init debug_objects_selftest(void)
1213{
1214	int fixups, oldfixups, warnings, oldwarnings;
1215	unsigned long flags;
1216
1217	local_irq_save(flags);
1218
1219	fixups = oldfixups = debug_objects_fixups;
1220	warnings = oldwarnings = debug_objects_warnings;
1221	descr_test = &descr_type_test;
1222
1223	debug_object_init(&obj, &descr_type_test);
1224	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1225		goto out;
1226	debug_object_activate(&obj, &descr_type_test);
1227	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1228		goto out;
1229	debug_object_activate(&obj, &descr_type_test);
1230	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1231		goto out;
1232	debug_object_deactivate(&obj, &descr_type_test);
1233	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1234		goto out;
1235	debug_object_destroy(&obj, &descr_type_test);
1236	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1237		goto out;
1238	debug_object_init(&obj, &descr_type_test);
1239	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1240		goto out;
1241	debug_object_activate(&obj, &descr_type_test);
1242	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1243		goto out;
1244	debug_object_deactivate(&obj, &descr_type_test);
1245	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1246		goto out;
1247	debug_object_free(&obj, &descr_type_test);
1248	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1249		goto out;
1250
1251	obj.static_init = 1;
1252	debug_object_activate(&obj, &descr_type_test);
1253	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1254		goto out;
1255	debug_object_init(&obj, &descr_type_test);
1256	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1257		goto out;
1258	debug_object_free(&obj, &descr_type_test);
1259	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1260		goto out;
1261
1262#ifdef CONFIG_DEBUG_OBJECTS_FREE
1263	debug_object_init(&obj, &descr_type_test);
1264	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1265		goto out;
1266	debug_object_activate(&obj, &descr_type_test);
1267	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1268		goto out;
1269	__debug_check_no_obj_freed(&obj, sizeof(obj));
1270	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1271		goto out;
1272#endif
1273	pr_info("selftest passed\n");
1274
1275out:
1276	debug_objects_fixups = oldfixups;
1277	debug_objects_warnings = oldwarnings;
1278	descr_test = NULL;
1279
1280	local_irq_restore(flags);
1281}
1282#else
1283static inline void debug_objects_selftest(void) { }
1284#endif
1285
1286/*
1287 * Called during early boot to initialize the hash buckets and link
1288 * the static object pool objects into the poll list. After this call
1289 * the object tracker is fully operational.
1290 */
1291void __init debug_objects_early_init(void)
1292{
1293	int i;
1294
1295	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1296		raw_spin_lock_init(&obj_hash[i].lock);
1297
1298	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1299		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1300}
1301
1302/*
1303 * Convert the statically allocated objects to dynamic ones:
1304 */
1305static int __init debug_objects_replace_static_objects(void)
1306{
1307	struct debug_bucket *db = obj_hash;
1308	struct hlist_node *tmp;
1309	struct debug_obj *obj, *new;
1310	HLIST_HEAD(objects);
1311	int i, cnt = 0;
1312
1313	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1314		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1315		if (!obj)
1316			goto free;
1317		hlist_add_head(&obj->node, &objects);
1318	}
1319
1320	debug_objects_allocated += i;
1321
1322	/*
1323	 * debug_objects_mem_init() is now called early that only one CPU is up
1324	 * and interrupts have been disabled, so it is safe to replace the
1325	 * active object references.
1326	 */
1327
1328	/* Remove the statically allocated objects from the pool */
1329	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1330		hlist_del(&obj->node);
1331	/* Move the allocated objects to the pool */
1332	hlist_move_list(&objects, &obj_pool);
1333
1334	/* Replace the active object references */
1335	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1336		hlist_move_list(&db->list, &objects);
1337
1338		hlist_for_each_entry(obj, &objects, node) {
1339			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1340			hlist_del(&new->node);
1341			/* copy object data */
1342			*new = *obj;
1343			hlist_add_head(&new->node, &db->list);
1344			cnt++;
1345		}
1346	}
1347
1348	pr_debug("%d of %d active objects replaced\n",
1349		 cnt, obj_pool_used);
1350	return 0;
1351free:
1352	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1353		hlist_del(&obj->node);
1354		kmem_cache_free(obj_cache, obj);
1355	}
1356	return -ENOMEM;
1357}
1358
1359/*
1360 * Called after the kmem_caches are functional to setup a dedicated
1361 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1362 * prevents that the debug code is called on kmem_cache_free() for the
1363 * debug tracker objects to avoid recursive calls.
1364 */
1365void __init debug_objects_mem_init(void)
1366{
1367	int cpu, extras;
1368
1369	if (!debug_objects_enabled)
1370		return;
1371
1372	/*
1373	 * Initialize the percpu object pools
1374	 *
1375	 * Initialization is not strictly necessary, but was done for
1376	 * completeness.
1377	 */
1378	for_each_possible_cpu(cpu)
1379		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1380
1381	obj_cache = kmem_cache_create("debug_objects_cache",
1382				      sizeof (struct debug_obj), 0,
1383				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1384				      NULL);
1385
1386	if (!obj_cache || debug_objects_replace_static_objects()) {
1387		debug_objects_enabled = 0;
1388		kmem_cache_destroy(obj_cache);
1389		pr_warn("out of memory.\n");
1390		return;
1391	} else
1392		debug_objects_selftest();
1393
1394#ifdef CONFIG_HOTPLUG_CPU
1395	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1396					object_cpu_offline);
1397#endif
1398
1399	/*
1400	 * Increase the thresholds for allocating and freeing objects
1401	 * according to the number of possible CPUs available in the system.
1402	 */
1403	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1404	debug_objects_pool_size += extras;
1405	debug_objects_pool_min_level += extras;
1406}
1407