1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * idr-test.c: Test the IDR API
4 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
5 */
6#include <linux/bitmap.h>
7#include <linux/idr.h>
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11
12#include "test.h"
13
14#define DUMMY_PTR	((void *)0x10)
15
16int item_idr_free(int id, void *p, void *data)
17{
18	struct item *item = p;
19	assert(item->index == id);
20	free(p);
21
22	return 0;
23}
24
25void item_idr_remove(struct idr *idr, int id)
26{
27	struct item *item = idr_find(idr, id);
28	assert(item->index == id);
29	idr_remove(idr, id);
30	free(item);
31}
32
33void idr_alloc_test(void)
34{
35	unsigned long i;
36	DEFINE_IDR(idr);
37
38	assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
39	assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
40	idr_remove(&idr, 0x3ffd);
41	idr_remove(&idr, 0);
42
43	for (i = 0x3ffe; i < 0x4003; i++) {
44		int id;
45		struct item *item;
46
47		if (i < 0x4000)
48			item = item_create(i, 0);
49		else
50			item = item_create(i - 0x3fff, 0);
51
52		id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
53		assert(id == item->index);
54	}
55
56	idr_for_each(&idr, item_idr_free, &idr);
57	idr_destroy(&idr);
58}
59
60void idr_replace_test(void)
61{
62	DEFINE_IDR(idr);
63
64	idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
65	idr_replace(&idr, &idr, 10);
66
67	idr_destroy(&idr);
68}
69
70/*
71 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
72 * the IDR.  Some interfaces, like idr_find() do not distinguish between
73 * "present, value is NULL" and "not present", but that's exactly what some
74 * users want.
75 */
76void idr_null_test(void)
77{
78	int i;
79	DEFINE_IDR(idr);
80
81	assert(idr_is_empty(&idr));
82
83	assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
84	assert(!idr_is_empty(&idr));
85	idr_remove(&idr, 0);
86	assert(idr_is_empty(&idr));
87
88	assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
89	assert(!idr_is_empty(&idr));
90	idr_destroy(&idr);
91	assert(idr_is_empty(&idr));
92
93	for (i = 0; i < 10; i++) {
94		assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
95	}
96
97	assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
98	assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
99	assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
100	assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
101	idr_remove(&idr, 5);
102	assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
103	idr_remove(&idr, 5);
104
105	for (i = 0; i < 9; i++) {
106		idr_remove(&idr, i);
107		assert(!idr_is_empty(&idr));
108	}
109	idr_remove(&idr, 8);
110	assert(!idr_is_empty(&idr));
111	idr_remove(&idr, 9);
112	assert(idr_is_empty(&idr));
113
114	assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
115	assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
116	assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
117	assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
118
119	idr_destroy(&idr);
120	assert(idr_is_empty(&idr));
121
122	for (i = 1; i < 10; i++) {
123		assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
124	}
125
126	idr_destroy(&idr);
127	assert(idr_is_empty(&idr));
128}
129
130void idr_nowait_test(void)
131{
132	unsigned int i;
133	DEFINE_IDR(idr);
134
135	idr_preload(GFP_KERNEL);
136
137	for (i = 0; i < 3; i++) {
138		struct item *item = item_create(i, 0);
139		assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
140	}
141
142	idr_preload_end();
143
144	idr_for_each(&idr, item_idr_free, &idr);
145	idr_destroy(&idr);
146}
147
148void idr_get_next_test(int base)
149{
150	unsigned long i;
151	int nextid;
152	DEFINE_IDR(idr);
153	idr_init_base(&idr, base);
154
155	int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
156
157	for(i = 0; indices[i]; i++) {
158		struct item *item = item_create(indices[i], 0);
159		assert(idr_alloc(&idr, item, indices[i], indices[i+1],
160				 GFP_KERNEL) == indices[i]);
161	}
162
163	for(i = 0, nextid = 0; indices[i]; i++) {
164		idr_get_next(&idr, &nextid);
165		assert(nextid == indices[i]);
166		nextid++;
167	}
168
169	idr_for_each(&idr, item_idr_free, &idr);
170	idr_destroy(&idr);
171}
172
173int idr_u32_cb(int id, void *ptr, void *data)
174{
175	BUG_ON(id < 0);
176	BUG_ON(ptr != DUMMY_PTR);
177	return 0;
178}
179
180void idr_u32_test1(struct idr *idr, u32 handle)
181{
182	static bool warned = false;
183	u32 id = handle;
184	int sid = 0;
185	void *ptr;
186
187	BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
188	BUG_ON(id != handle);
189	BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
190	BUG_ON(id != handle);
191	if (!warned && id > INT_MAX)
192		printk("vvv Ignore these warnings\n");
193	ptr = idr_get_next(idr, &sid);
194	if (id > INT_MAX) {
195		BUG_ON(ptr != NULL);
196		BUG_ON(sid != 0);
197	} else {
198		BUG_ON(ptr != DUMMY_PTR);
199		BUG_ON(sid != id);
200	}
201	idr_for_each(idr, idr_u32_cb, NULL);
202	if (!warned && id > INT_MAX) {
203		printk("^^^ Warnings over\n");
204		warned = true;
205	}
206	BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
207	BUG_ON(!idr_is_empty(idr));
208}
209
210void idr_u32_test(int base)
211{
212	DEFINE_IDR(idr);
213	idr_init_base(&idr, base);
214	idr_u32_test1(&idr, 10);
215	idr_u32_test1(&idr, 0x7fffffff);
216	idr_u32_test1(&idr, 0x80000000);
217	idr_u32_test1(&idr, 0x80000001);
218	idr_u32_test1(&idr, 0xffe00000);
219	idr_u32_test1(&idr, 0xffffffff);
220}
221
222static void idr_align_test(struct idr *idr)
223{
224	char name[] = "Motorola 68000";
225	int i, id;
226	void *entry;
227
228	for (i = 0; i < 9; i++) {
229		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
230		idr_for_each_entry(idr, entry, id);
231	}
232	idr_destroy(idr);
233
234	for (i = 1; i < 10; i++) {
235		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
236		idr_for_each_entry(idr, entry, id);
237	}
238	idr_destroy(idr);
239
240	for (i = 2; i < 11; i++) {
241		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
242		idr_for_each_entry(idr, entry, id);
243	}
244	idr_destroy(idr);
245
246	for (i = 3; i < 12; i++) {
247		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
248		idr_for_each_entry(idr, entry, id);
249	}
250	idr_destroy(idr);
251
252	for (i = 0; i < 8; i++) {
253		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
254		BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
255		idr_for_each_entry(idr, entry, id);
256		idr_remove(idr, 1);
257		idr_for_each_entry(idr, entry, id);
258		idr_remove(idr, 0);
259		BUG_ON(!idr_is_empty(idr));
260	}
261
262	for (i = 0; i < 8; i++) {
263		BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0);
264		idr_for_each_entry(idr, entry, id);
265		idr_replace(idr, &name[i], 0);
266		idr_for_each_entry(idr, entry, id);
267		BUG_ON(idr_find(idr, 0) != &name[i]);
268		idr_remove(idr, 0);
269	}
270
271	for (i = 0; i < 8; i++) {
272		BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
273		BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
274		idr_remove(idr, 1);
275		idr_for_each_entry(idr, entry, id);
276		idr_replace(idr, &name[i + 1], 0);
277		idr_for_each_entry(idr, entry, id);
278		idr_remove(idr, 0);
279	}
280}
281
282DEFINE_IDR(find_idr);
283
284static void *idr_throbber(void *arg)
285{
286	time_t start = time(NULL);
287	int id = *(int *)arg;
288
289	rcu_register_thread();
290	do {
291		idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL);
292		idr_remove(&find_idr, id);
293	} while (time(NULL) < start + 10);
294	rcu_unregister_thread();
295
296	return NULL;
297}
298
299/*
300 * There are always either 1 or 2 objects in the IDR.  If we find nothing,
301 * or we find something at an ID we didn't expect, that's a bug.
302 */
303void idr_find_test_1(int anchor_id, int throbber_id)
304{
305	pthread_t throbber;
306	time_t start = time(NULL);
307
308	BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
309				anchor_id + 1, GFP_KERNEL) != anchor_id);
310
311	pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
312
313	rcu_read_lock();
314	do {
315		int id = 0;
316		void *entry = idr_get_next(&find_idr, &id);
317		rcu_read_unlock();
318		if ((id != anchor_id && id != throbber_id) ||
319		    entry != xa_mk_value(id)) {
320			printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
321				throbber_id, entry, id);
322			abort();
323		}
324		rcu_read_lock();
325	} while (time(NULL) < start + 11);
326	rcu_read_unlock();
327
328	pthread_join(throbber, NULL);
329
330	idr_remove(&find_idr, anchor_id);
331	BUG_ON(!idr_is_empty(&find_idr));
332}
333
334void idr_find_test(void)
335{
336	idr_find_test_1(100000, 0);
337	idr_find_test_1(0, 100000);
338}
339
340void idr_checks(void)
341{
342	unsigned long i;
343	DEFINE_IDR(idr);
344
345	for (i = 0; i < 10000; i++) {
346		struct item *item = item_create(i, 0);
347		assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
348	}
349
350	assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
351
352	for (i = 0; i < 5000; i++)
353		item_idr_remove(&idr, i);
354
355	idr_remove(&idr, 3);
356
357	idr_for_each(&idr, item_idr_free, &idr);
358	idr_destroy(&idr);
359
360	assert(idr_is_empty(&idr));
361
362	idr_remove(&idr, 3);
363	idr_remove(&idr, 0);
364
365	assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
366	idr_remove(&idr, 1);
367	for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
368		assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
369	idr_remove(&idr, 1 << 30);
370	idr_destroy(&idr);
371
372	for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
373		struct item *item = item_create(i, 0);
374		assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
375	}
376	assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
377	assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
378
379	idr_for_each(&idr, item_idr_free, &idr);
380	idr_destroy(&idr);
381	idr_destroy(&idr);
382
383	assert(idr_is_empty(&idr));
384
385	idr_set_cursor(&idr, INT_MAX - 3UL);
386	for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
387		struct item *item;
388		unsigned int id;
389		if (i <= INT_MAX)
390			item = item_create(i, 0);
391		else
392			item = item_create(i - INT_MAX - 1, 0);
393
394		id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
395		assert(id == item->index);
396	}
397
398	idr_for_each(&idr, item_idr_free, &idr);
399	idr_destroy(&idr);
400	assert(idr_is_empty(&idr));
401
402	for (i = 1; i < 10000; i++) {
403		struct item *item = item_create(i, 0);
404		assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
405	}
406
407	idr_for_each(&idr, item_idr_free, &idr);
408	idr_destroy(&idr);
409
410	idr_replace_test();
411	idr_alloc_test();
412	idr_null_test();
413	idr_nowait_test();
414	idr_get_next_test(0);
415	idr_get_next_test(1);
416	idr_get_next_test(4);
417	idr_u32_test(4);
418	idr_u32_test(1);
419	idr_u32_test(0);
420	idr_align_test(&idr);
421	idr_find_test();
422}
423
424#define module_init(x)
425#define module_exit(x)
426#define MODULE_AUTHOR(x)
427#define MODULE_LICENSE(x)
428#define dump_stack()    assert(0)
429void ida_dump(struct ida *);
430
431#include "../../../lib/test_ida.c"
432
433/*
434 * Check that we get the correct error when we run out of memory doing
435 * allocations.  In userspace, GFP_NOWAIT will always fail an allocation.
436 * The first test is for not having a bitmap available, and the second test
437 * is for not being able to allocate a level of the radix tree.
438 */
439void ida_check_nomem(void)
440{
441	DEFINE_IDA(ida);
442	int id;
443
444	id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
445	IDA_BUG_ON(&ida, id != -ENOMEM);
446	id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
447	IDA_BUG_ON(&ida, id != -ENOMEM);
448	IDA_BUG_ON(&ida, !ida_is_empty(&ida));
449}
450
451/*
452 * Check handling of conversions between exceptional entries and full bitmaps.
453 */
454void ida_check_conv_user(void)
455{
456	DEFINE_IDA(ida);
457	unsigned long i;
458
459	for (i = 0; i < 1000000; i++) {
460		int id = ida_alloc(&ida, GFP_NOWAIT);
461		if (id == -ENOMEM) {
462			IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
463					  BITS_PER_XA_VALUE) &&
464					 ((i % IDA_BITMAP_BITS) != 0));
465			id = ida_alloc(&ida, GFP_KERNEL);
466		} else {
467			IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
468					BITS_PER_XA_VALUE);
469		}
470		IDA_BUG_ON(&ida, id != i);
471	}
472	ida_destroy(&ida);
473}
474
475void ida_check_random(void)
476{
477	DEFINE_IDA(ida);
478	DECLARE_BITMAP(bitmap, 2048);
479	unsigned int i;
480	time_t s = time(NULL);
481
482 repeat:
483	memset(bitmap, 0, sizeof(bitmap));
484	for (i = 0; i < 100000; i++) {
485		int i = rand();
486		int bit = i & 2047;
487		if (test_bit(bit, bitmap)) {
488			__clear_bit(bit, bitmap);
489			ida_free(&ida, bit);
490		} else {
491			__set_bit(bit, bitmap);
492			IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL)
493					!= bit);
494		}
495	}
496	ida_destroy(&ida);
497	if (time(NULL) < s + 10)
498		goto repeat;
499}
500
501void ida_simple_get_remove_test(void)
502{
503	DEFINE_IDA(ida);
504	unsigned long i;
505
506	for (i = 0; i < 10000; i++) {
507		assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
508	}
509	assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
510
511	for (i = 0; i < 10000; i++) {
512		ida_simple_remove(&ida, i);
513	}
514	assert(ida_is_empty(&ida));
515
516	ida_destroy(&ida);
517}
518
519void user_ida_checks(void)
520{
521	radix_tree_cpu_dead(1);
522
523	ida_check_nomem();
524	ida_check_conv_user();
525	ida_check_random();
526	ida_simple_get_remove_test();
527
528	radix_tree_cpu_dead(1);
529}
530
531static void *ida_random_fn(void *arg)
532{
533	rcu_register_thread();
534	ida_check_random();
535	rcu_unregister_thread();
536	return NULL;
537}
538
539static void *ida_leak_fn(void *arg)
540{
541	struct ida *ida = arg;
542	time_t s = time(NULL);
543	int i, ret;
544
545	rcu_register_thread();
546
547	do for (i = 0; i < 1000; i++) {
548		ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
549		if (ret >= 0)
550			ida_free(ida, 128);
551	} while (time(NULL) < s + 2);
552
553	rcu_unregister_thread();
554	return NULL;
555}
556
557void ida_thread_tests(void)
558{
559	DEFINE_IDA(ida);
560	pthread_t threads[20];
561	int i;
562
563	for (i = 0; i < ARRAY_SIZE(threads); i++)
564		if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
565			perror("creating ida thread");
566			exit(1);
567		}
568
569	while (i--)
570		pthread_join(threads[i], NULL);
571
572	for (i = 0; i < ARRAY_SIZE(threads); i++)
573		if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
574			perror("creating ida thread");
575			exit(1);
576		}
577
578	while (i--)
579		pthread_join(threads[i], NULL);
580	assert(ida_is_empty(&ida));
581}
582
583void ida_tests(void)
584{
585	user_ida_checks();
586	ida_checks();
587	ida_exit();
588	ida_thread_tests();
589}
590
591int __weak main(void)
592{
593	rcu_register_thread();
594	radix_tree_init();
595	idr_checks();
596	ida_tests();
597	radix_tree_cpu_dead(1);
598	rcu_barrier();
599	if (nr_allocated)
600		printf("nr_allocated = %d\n", nr_allocated);
601	rcu_unregister_thread();
602	return 0;
603}
604