1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Copyright (c) 2019-2020 Oracle
6 * Author: Matthew Wilcox <willy@infradead.org>
7 */
8
9#include <linux/xarray.h>
10#include <linux/module.h>
11
12static unsigned int tests_run;
13static unsigned int tests_passed;
14
15static const unsigned int order_limit =
16		IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
17
18#ifndef XA_DEBUG
19# ifdef __KERNEL__
20void xa_dump(const struct xarray *xa) { }
21# endif
22#undef XA_BUG_ON
23#define XA_BUG_ON(xa, x) do {					\
24	tests_run++;						\
25	if (x) {						\
26		printk("BUG at %s:%d\n", __func__, __LINE__);	\
27		xa_dump(xa);					\
28		dump_stack();					\
29	} else {						\
30		tests_passed++;					\
31	}							\
32} while (0)
33#endif
34
35static void *xa_mk_index(unsigned long index)
36{
37	return xa_mk_value(index & LONG_MAX);
38}
39
40static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
41{
42	return xa_store(xa, index, xa_mk_index(index), gfp);
43}
44
45static void xa_insert_index(struct xarray *xa, unsigned long index)
46{
47	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
48				GFP_KERNEL) != 0);
49}
50
51static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
52{
53	u32 id;
54
55	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
56				gfp) != 0);
57	XA_BUG_ON(xa, id != index);
58}
59
60static void xa_erase_index(struct xarray *xa, unsigned long index)
61{
62	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
63	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
64}
65
66/*
67 * If anyone needs this, please move it to xarray.c.  We have no current
68 * users outside the test suite because all current multislot users want
69 * to use the advanced API.
70 */
71static void *xa_store_order(struct xarray *xa, unsigned long index,
72		unsigned order, void *entry, gfp_t gfp)
73{
74	XA_STATE_ORDER(xas, xa, index, order);
75	void *curr;
76
77	do {
78		xas_lock(&xas);
79		curr = xas_store(&xas, entry);
80		xas_unlock(&xas);
81	} while (xas_nomem(&xas, gfp));
82
83	return curr;
84}
85
86static noinline void check_xa_err(struct xarray *xa)
87{
88	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
89	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
90#ifndef __KERNEL__
91	/* The kernel does not fail GFP_NOWAIT allocations */
92	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
93	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
94#endif
95	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
96	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
97	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
98// kills the test-suite :-(
99//	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
100}
101
102static noinline void check_xas_retry(struct xarray *xa)
103{
104	XA_STATE(xas, xa, 0);
105	void *entry;
106
107	xa_store_index(xa, 0, GFP_KERNEL);
108	xa_store_index(xa, 1, GFP_KERNEL);
109
110	rcu_read_lock();
111	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
112	xa_erase_index(xa, 1);
113	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
114	XA_BUG_ON(xa, xas_retry(&xas, NULL));
115	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
116	xas_reset(&xas);
117	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
118	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
119	XA_BUG_ON(xa, xas.xa_node != NULL);
120	rcu_read_unlock();
121
122	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
123
124	rcu_read_lock();
125	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
126	xas.xa_node = XAS_RESTART;
127	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
128	rcu_read_unlock();
129
130	/* Make sure we can iterate through retry entries */
131	xas_lock(&xas);
132	xas_set(&xas, 0);
133	xas_store(&xas, XA_RETRY_ENTRY);
134	xas_set(&xas, 1);
135	xas_store(&xas, XA_RETRY_ENTRY);
136
137	xas_set(&xas, 0);
138	xas_for_each(&xas, entry, ULONG_MAX) {
139		xas_store(&xas, xa_mk_index(xas.xa_index));
140	}
141	xas_unlock(&xas);
142
143	xa_erase_index(xa, 0);
144	xa_erase_index(xa, 1);
145}
146
147static noinline void check_xa_load(struct xarray *xa)
148{
149	unsigned long i, j;
150
151	for (i = 0; i < 1024; i++) {
152		for (j = 0; j < 1024; j++) {
153			void *entry = xa_load(xa, j);
154			if (j < i)
155				XA_BUG_ON(xa, xa_to_value(entry) != j);
156			else
157				XA_BUG_ON(xa, entry);
158		}
159		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
160	}
161
162	for (i = 0; i < 1024; i++) {
163		for (j = 0; j < 1024; j++) {
164			void *entry = xa_load(xa, j);
165			if (j >= i)
166				XA_BUG_ON(xa, xa_to_value(entry) != j);
167			else
168				XA_BUG_ON(xa, entry);
169		}
170		xa_erase_index(xa, i);
171	}
172	XA_BUG_ON(xa, !xa_empty(xa));
173}
174
175static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
176{
177	unsigned int order;
178	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
179
180	/* NULL elements have no marks set */
181	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
182	xa_set_mark(xa, index, XA_MARK_0);
183	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
184
185	/* Storing a pointer will not make a mark appear */
186	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
187	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
188	xa_set_mark(xa, index, XA_MARK_0);
189	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
190
191	/* Setting one mark will not set another mark */
192	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
193	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
194
195	/* Storing NULL clears marks, and they can't be set again */
196	xa_erase_index(xa, index);
197	XA_BUG_ON(xa, !xa_empty(xa));
198	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
199	xa_set_mark(xa, index, XA_MARK_0);
200	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
201
202	/*
203	 * Storing a multi-index entry over entries with marks gives the
204	 * entire entry the union of the marks
205	 */
206	BUG_ON((index % 4) != 0);
207	for (order = 2; order < max_order; order++) {
208		unsigned long base = round_down(index, 1UL << order);
209		unsigned long next = base + (1UL << order);
210		unsigned long i;
211
212		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
213		xa_set_mark(xa, index + 1, XA_MARK_0);
214		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
215		xa_set_mark(xa, index + 2, XA_MARK_2);
216		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
217		xa_store_order(xa, index, order, xa_mk_index(index),
218				GFP_KERNEL);
219		for (i = base; i < next; i++) {
220			XA_STATE(xas, xa, i);
221			unsigned int seen = 0;
222			void *entry;
223
224			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
225			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
226			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
227
228			/* We should see two elements in the array */
229			rcu_read_lock();
230			xas_for_each(&xas, entry, ULONG_MAX)
231				seen++;
232			rcu_read_unlock();
233			XA_BUG_ON(xa, seen != 2);
234
235			/* One of which is marked */
236			xas_set(&xas, 0);
237			seen = 0;
238			rcu_read_lock();
239			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
240				seen++;
241			rcu_read_unlock();
242			XA_BUG_ON(xa, seen != 1);
243		}
244		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
245		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
246		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
247		xa_erase_index(xa, index);
248		xa_erase_index(xa, next);
249		XA_BUG_ON(xa, !xa_empty(xa));
250	}
251	XA_BUG_ON(xa, !xa_empty(xa));
252}
253
254static noinline void check_xa_mark_2(struct xarray *xa)
255{
256	XA_STATE(xas, xa, 0);
257	unsigned long index;
258	unsigned int count = 0;
259	void *entry;
260
261	xa_store_index(xa, 0, GFP_KERNEL);
262	xa_set_mark(xa, 0, XA_MARK_0);
263	xas_lock(&xas);
264	xas_load(&xas);
265	xas_init_marks(&xas);
266	xas_unlock(&xas);
267	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
268
269	for (index = 3500; index < 4500; index++) {
270		xa_store_index(xa, index, GFP_KERNEL);
271		xa_set_mark(xa, index, XA_MARK_0);
272	}
273
274	xas_reset(&xas);
275	rcu_read_lock();
276	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
277		count++;
278	rcu_read_unlock();
279	XA_BUG_ON(xa, count != 1000);
280
281	xas_lock(&xas);
282	xas_for_each(&xas, entry, ULONG_MAX) {
283		xas_init_marks(&xas);
284		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
285		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
286	}
287	xas_unlock(&xas);
288
289	xa_destroy(xa);
290}
291
292static noinline void check_xa_mark_3(struct xarray *xa)
293{
294#ifdef CONFIG_XARRAY_MULTI
295	XA_STATE(xas, xa, 0x41);
296	void *entry;
297	int count = 0;
298
299	xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
300	xa_set_mark(xa, 0x41, XA_MARK_0);
301
302	rcu_read_lock();
303	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
304		count++;
305		XA_BUG_ON(xa, entry != xa_mk_index(0x40));
306	}
307	XA_BUG_ON(xa, count != 1);
308	rcu_read_unlock();
309	xa_destroy(xa);
310#endif
311}
312
313static noinline void check_xa_mark(struct xarray *xa)
314{
315	unsigned long index;
316
317	for (index = 0; index < 16384; index += 4)
318		check_xa_mark_1(xa, index);
319
320	check_xa_mark_2(xa);
321	check_xa_mark_3(xa);
322}
323
324static noinline void check_xa_shrink(struct xarray *xa)
325{
326	XA_STATE(xas, xa, 1);
327	struct xa_node *node;
328	unsigned int order;
329	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
330
331	XA_BUG_ON(xa, !xa_empty(xa));
332	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
333	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
334
335	/*
336	 * Check that erasing the entry at 1 shrinks the tree and properly
337	 * marks the node as being deleted.
338	 */
339	xas_lock(&xas);
340	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
341	node = xas.xa_node;
342	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
343	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
344	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
345	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
346	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
347	XA_BUG_ON(xa, xas_load(&xas) != NULL);
348	xas_unlock(&xas);
349	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
350	xa_erase_index(xa, 0);
351	XA_BUG_ON(xa, !xa_empty(xa));
352
353	for (order = 0; order < max_order; order++) {
354		unsigned long max = (1UL << order) - 1;
355		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
356		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
357		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
358		rcu_read_lock();
359		node = xa_head(xa);
360		rcu_read_unlock();
361		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
362				NULL);
363		rcu_read_lock();
364		XA_BUG_ON(xa, xa_head(xa) == node);
365		rcu_read_unlock();
366		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
367		xa_erase_index(xa, ULONG_MAX);
368		XA_BUG_ON(xa, xa->xa_head != node);
369		xa_erase_index(xa, 0);
370	}
371}
372
373static noinline void check_insert(struct xarray *xa)
374{
375	unsigned long i;
376
377	for (i = 0; i < 1024; i++) {
378		xa_insert_index(xa, i);
379		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
380		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
381		xa_erase_index(xa, i);
382	}
383
384	for (i = 10; i < BITS_PER_LONG; i++) {
385		xa_insert_index(xa, 1UL << i);
386		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
387		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
388		xa_erase_index(xa, 1UL << i);
389
390		xa_insert_index(xa, (1UL << i) - 1);
391		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
392		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
393		xa_erase_index(xa, (1UL << i) - 1);
394	}
395
396	xa_insert_index(xa, ~0UL);
397	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
398	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
399	xa_erase_index(xa, ~0UL);
400
401	XA_BUG_ON(xa, !xa_empty(xa));
402}
403
404static noinline void check_cmpxchg(struct xarray *xa)
405{
406	void *FIVE = xa_mk_value(5);
407	void *SIX = xa_mk_value(6);
408	void *LOTS = xa_mk_value(12345678);
409
410	XA_BUG_ON(xa, !xa_empty(xa));
411	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
412	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
413	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
414	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
415	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
416	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
417	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
418	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
419	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
420	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
421	xa_erase_index(xa, 12345678);
422	xa_erase_index(xa, 5);
423	XA_BUG_ON(xa, !xa_empty(xa));
424}
425
426static noinline void check_cmpxchg_order(struct xarray *xa)
427{
428#ifdef CONFIG_XARRAY_MULTI
429	void *FIVE = xa_mk_value(5);
430	unsigned int i, order = 3;
431
432	XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
433
434	/* Check entry FIVE has the order saved */
435	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
436
437	/* Check all the tied indexes have the same entry and order */
438	for (i = 0; i < (1 << order); i++) {
439		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
440		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
441	}
442
443	/* Ensure that nothing is stored at index '1 << order' */
444	XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
445
446	/*
447	 * Additionally, keep the node information and the order at
448	 * '1 << order'
449	 */
450	XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
451	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
452		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
453		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
454	}
455
456	/* Conditionally replace FIVE entry at index '0' with NULL */
457	XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
458
459	/* Verify the order is lost at FIVE (and old) entries */
460	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
461
462	/* Verify the order and entries are lost in all the tied indexes */
463	for (i = 0; i < (1 << order); i++) {
464		XA_BUG_ON(xa, xa_load(xa, i) != NULL);
465		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
466	}
467
468	/* Verify node and order are kept at '1 << order' */
469	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
470		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
471		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
472	}
473
474	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
475	XA_BUG_ON(xa, !xa_empty(xa));
476#endif
477}
478
479static noinline void check_reserve(struct xarray *xa)
480{
481	void *entry;
482	unsigned long index;
483	int count;
484
485	/* An array with a reserved entry is not empty */
486	XA_BUG_ON(xa, !xa_empty(xa));
487	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
488	XA_BUG_ON(xa, xa_empty(xa));
489	XA_BUG_ON(xa, xa_load(xa, 12345678));
490	xa_release(xa, 12345678);
491	XA_BUG_ON(xa, !xa_empty(xa));
492
493	/* Releasing a used entry does nothing */
494	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
495	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
496	xa_release(xa, 12345678);
497	xa_erase_index(xa, 12345678);
498	XA_BUG_ON(xa, !xa_empty(xa));
499
500	/* cmpxchg sees a reserved entry as ZERO */
501	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
502	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
503				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
504	xa_release(xa, 12345678);
505	xa_erase_index(xa, 12345678);
506	XA_BUG_ON(xa, !xa_empty(xa));
507
508	/* xa_insert treats it as busy */
509	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
510	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
511			-EBUSY);
512	XA_BUG_ON(xa, xa_empty(xa));
513	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
514	XA_BUG_ON(xa, !xa_empty(xa));
515
516	/* Can iterate through a reserved entry */
517	xa_store_index(xa, 5, GFP_KERNEL);
518	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
519	xa_store_index(xa, 7, GFP_KERNEL);
520
521	count = 0;
522	xa_for_each(xa, index, entry) {
523		XA_BUG_ON(xa, index != 5 && index != 7);
524		count++;
525	}
526	XA_BUG_ON(xa, count != 2);
527
528	/* If we free a reserved entry, we should be able to allocate it */
529	if (xa->xa_flags & XA_FLAGS_ALLOC) {
530		u32 id;
531
532		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
533					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
534		XA_BUG_ON(xa, id != 8);
535
536		xa_release(xa, 6);
537		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
538					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
539		XA_BUG_ON(xa, id != 6);
540	}
541
542	xa_destroy(xa);
543}
544
545static noinline void check_xas_erase(struct xarray *xa)
546{
547	XA_STATE(xas, xa, 0);
548	void *entry;
549	unsigned long i, j;
550
551	for (i = 0; i < 200; i++) {
552		for (j = i; j < 2 * i + 17; j++) {
553			xas_set(&xas, j);
554			do {
555				xas_lock(&xas);
556				xas_store(&xas, xa_mk_index(j));
557				xas_unlock(&xas);
558			} while (xas_nomem(&xas, GFP_KERNEL));
559		}
560
561		xas_set(&xas, ULONG_MAX);
562		do {
563			xas_lock(&xas);
564			xas_store(&xas, xa_mk_value(0));
565			xas_unlock(&xas);
566		} while (xas_nomem(&xas, GFP_KERNEL));
567
568		xas_lock(&xas);
569		xas_store(&xas, NULL);
570
571		xas_set(&xas, 0);
572		j = i;
573		xas_for_each(&xas, entry, ULONG_MAX) {
574			XA_BUG_ON(xa, entry != xa_mk_index(j));
575			xas_store(&xas, NULL);
576			j++;
577		}
578		xas_unlock(&xas);
579		XA_BUG_ON(xa, !xa_empty(xa));
580	}
581}
582
583#ifdef CONFIG_XARRAY_MULTI
584static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
585		unsigned int order)
586{
587	XA_STATE(xas, xa, index);
588	unsigned long min = index & ~((1UL << order) - 1);
589	unsigned long max = min + (1UL << order);
590
591	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
592	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
593	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
594	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
595	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
596
597	xas_lock(&xas);
598	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
599	xas_unlock(&xas);
600	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
601	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
602	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
603	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
604
605	xa_erase_index(xa, min);
606	XA_BUG_ON(xa, !xa_empty(xa));
607}
608
609static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
610		unsigned int order)
611{
612	XA_STATE(xas, xa, index);
613	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
614
615	xas_lock(&xas);
616	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
617	XA_BUG_ON(xa, xas.xa_index != index);
618	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
619	xas_unlock(&xas);
620	XA_BUG_ON(xa, !xa_empty(xa));
621}
622
623static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
624		unsigned int order)
625{
626	XA_STATE(xas, xa, 0);
627	void *entry;
628	int n = 0;
629
630	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
631
632	xas_lock(&xas);
633	xas_for_each(&xas, entry, ULONG_MAX) {
634		XA_BUG_ON(xa, entry != xa_mk_index(index));
635		n++;
636	}
637	XA_BUG_ON(xa, n != 1);
638	xas_set(&xas, index + 1);
639	xas_for_each(&xas, entry, ULONG_MAX) {
640		XA_BUG_ON(xa, entry != xa_mk_index(index));
641		n++;
642	}
643	XA_BUG_ON(xa, n != 2);
644	xas_unlock(&xas);
645
646	xa_destroy(xa);
647}
648#endif
649
650static noinline void check_multi_store(struct xarray *xa)
651{
652#ifdef CONFIG_XARRAY_MULTI
653	unsigned long i, j, k;
654	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
655
656	/* Loading from any position returns the same value */
657	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
658	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
659	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
660	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
661	rcu_read_lock();
662	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
663	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
664	rcu_read_unlock();
665
666	/* Storing adjacent to the value does not alter the value */
667	xa_store(xa, 3, xa, GFP_KERNEL);
668	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
669	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
670	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
671	rcu_read_lock();
672	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
673	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
674	rcu_read_unlock();
675
676	/* Overwriting multiple indexes works */
677	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
678	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
679	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
680	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
681	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
682	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
683	rcu_read_lock();
684	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
685	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
686	rcu_read_unlock();
687
688	/* We can erase multiple values with a single store */
689	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
690	XA_BUG_ON(xa, !xa_empty(xa));
691
692	/* Even when the first slot is empty but the others aren't */
693	xa_store_index(xa, 1, GFP_KERNEL);
694	xa_store_index(xa, 2, GFP_KERNEL);
695	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
696	XA_BUG_ON(xa, !xa_empty(xa));
697
698	for (i = 0; i < max_order; i++) {
699		for (j = 0; j < max_order; j++) {
700			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
701			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
702
703			for (k = 0; k < max_order; k++) {
704				void *entry = xa_load(xa, (1UL << k) - 1);
705				if ((i < k) && (j < k))
706					XA_BUG_ON(xa, entry != NULL);
707				else
708					XA_BUG_ON(xa, entry != xa_mk_index(j));
709			}
710
711			xa_erase(xa, 0);
712			XA_BUG_ON(xa, !xa_empty(xa));
713		}
714	}
715
716	for (i = 0; i < 20; i++) {
717		check_multi_store_1(xa, 200, i);
718		check_multi_store_1(xa, 0, i);
719		check_multi_store_1(xa, (1UL << i) + 1, i);
720	}
721	check_multi_store_2(xa, 4095, 9);
722
723	for (i = 1; i < 20; i++) {
724		check_multi_store_3(xa, 0, i);
725		check_multi_store_3(xa, 1UL << i, i);
726	}
727#endif
728}
729
730#ifdef CONFIG_XARRAY_MULTI
731/* mimics page cache __filemap_add_folio() */
732static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
733						  unsigned long index,
734						  unsigned int order,
735						  void *p)
736{
737	XA_STATE(xas, xa, index);
738	unsigned int nrpages = 1UL << order;
739
740	/* users are responsible for index alignemnt to the order when adding */
741	XA_BUG_ON(xa, index & (nrpages - 1));
742
743	xas_set_order(&xas, index, order);
744
745	do {
746		xas_lock_irq(&xas);
747
748		xas_store(&xas, p);
749		XA_BUG_ON(xa, xas_error(&xas));
750		XA_BUG_ON(xa, xa_load(xa, index) != p);
751
752		xas_unlock_irq(&xas);
753	} while (xas_nomem(&xas, GFP_KERNEL));
754
755	XA_BUG_ON(xa, xas_error(&xas));
756}
757
758/* mimics page_cache_delete() */
759static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
760							unsigned long index,
761							unsigned int order)
762{
763	XA_STATE(xas, xa, index);
764
765	xas_set_order(&xas, index, order);
766	xas_store(&xas, NULL);
767	xas_init_marks(&xas);
768}
769
770static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
771						     unsigned long index,
772						     unsigned int order)
773{
774	xa_lock_irq(xa);
775	check_xa_multi_store_adv_del_entry(xa, index, order);
776	xa_unlock_irq(xa);
777}
778
779/* mimics page cache filemap_get_entry() */
780static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
781{
782	XA_STATE(xas, xa, index);
783	void *p;
784	static unsigned int loops = 0;
785
786	rcu_read_lock();
787repeat:
788	xas_reset(&xas);
789	p = xas_load(&xas);
790	if (xas_retry(&xas, p))
791		goto repeat;
792	rcu_read_unlock();
793
794	/*
795	 * This is not part of the page cache, this selftest is pretty
796	 * aggressive and does not want to trust the xarray API but rather
797	 * test it, and for order 20 (4 GiB block size) we can loop over
798	 * over a million entries which can cause a soft lockup. Page cache
799	 * APIs won't be stupid, proper page cache APIs loop over the proper
800	 * order so when using a larger order we skip shared entries.
801	 */
802	if (++loops % XA_CHECK_SCHED == 0)
803		schedule();
804
805	return p;
806}
807
808static unsigned long some_val = 0xdeadbeef;
809static unsigned long some_val_2 = 0xdeaddead;
810
811/* mimics the page cache usage */
812static noinline void check_xa_multi_store_adv(struct xarray *xa,
813					      unsigned long pos,
814					      unsigned int order)
815{
816	unsigned int nrpages = 1UL << order;
817	unsigned long index, base, next_index, next_next_index;
818	unsigned int i;
819
820	index = pos >> PAGE_SHIFT;
821	base = round_down(index, nrpages);
822	next_index = round_down(base + nrpages, nrpages);
823	next_next_index = round_down(next_index + nrpages, nrpages);
824
825	check_xa_multi_store_adv_add(xa, base, order, &some_val);
826
827	for (i = 0; i < nrpages; i++)
828		XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
829
830	XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
831
832	/* Use order 0 for the next item */
833	check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
834	XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
835
836	/* Remove the next item */
837	check_xa_multi_store_adv_delete(xa, next_index, 0);
838
839	/* Now use order for a new pointer */
840	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
841
842	for (i = 0; i < nrpages; i++)
843		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
844
845	check_xa_multi_store_adv_delete(xa, next_index, order);
846	check_xa_multi_store_adv_delete(xa, base, order);
847	XA_BUG_ON(xa, !xa_empty(xa));
848
849	/* starting fresh again */
850
851	/* let's test some holes now */
852
853	/* hole at base and next_next */
854	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
855
856	for (i = 0; i < nrpages; i++)
857		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
858
859	for (i = 0; i < nrpages; i++)
860		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
861
862	for (i = 0; i < nrpages; i++)
863		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
864
865	check_xa_multi_store_adv_delete(xa, next_index, order);
866	XA_BUG_ON(xa, !xa_empty(xa));
867
868	/* hole at base and next */
869
870	check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
871
872	for (i = 0; i < nrpages; i++)
873		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
874
875	for (i = 0; i < nrpages; i++)
876		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
877
878	for (i = 0; i < nrpages; i++)
879		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
880
881	check_xa_multi_store_adv_delete(xa, next_next_index, order);
882	XA_BUG_ON(xa, !xa_empty(xa));
883}
884#endif
885
886static noinline void check_multi_store_advanced(struct xarray *xa)
887{
888#ifdef CONFIG_XARRAY_MULTI
889	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
890	unsigned long end = ULONG_MAX/2;
891	unsigned long pos, i;
892
893	/*
894	 * About 117 million tests below.
895	 */
896	for (pos = 7; pos < end; pos = (pos * pos) + 564) {
897		for (i = 0; i < max_order; i++) {
898			check_xa_multi_store_adv(xa, pos, i);
899			check_xa_multi_store_adv(xa, pos + 157, i);
900		}
901	}
902#endif
903}
904
905static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
906{
907	int i;
908	u32 id;
909
910	XA_BUG_ON(xa, !xa_empty(xa));
911	/* An empty array should assign %base to the first alloc */
912	xa_alloc_index(xa, base, GFP_KERNEL);
913
914	/* Erasing it should make the array empty again */
915	xa_erase_index(xa, base);
916	XA_BUG_ON(xa, !xa_empty(xa));
917
918	/* And it should assign %base again */
919	xa_alloc_index(xa, base, GFP_KERNEL);
920
921	/* Allocating and then erasing a lot should not lose base */
922	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
923		xa_alloc_index(xa, i, GFP_KERNEL);
924	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
925		xa_erase_index(xa, i);
926	xa_alloc_index(xa, base, GFP_KERNEL);
927
928	/* Destroying the array should do the same as erasing */
929	xa_destroy(xa);
930
931	/* And it should assign %base again */
932	xa_alloc_index(xa, base, GFP_KERNEL);
933
934	/* The next assigned ID should be base+1 */
935	xa_alloc_index(xa, base + 1, GFP_KERNEL);
936	xa_erase_index(xa, base + 1);
937
938	/* Storing a value should mark it used */
939	xa_store_index(xa, base + 1, GFP_KERNEL);
940	xa_alloc_index(xa, base + 2, GFP_KERNEL);
941
942	/* If we then erase base, it should be free */
943	xa_erase_index(xa, base);
944	xa_alloc_index(xa, base, GFP_KERNEL);
945
946	xa_erase_index(xa, base + 1);
947	xa_erase_index(xa, base + 2);
948
949	for (i = 1; i < 5000; i++) {
950		xa_alloc_index(xa, base + i, GFP_KERNEL);
951	}
952
953	xa_destroy(xa);
954
955	/* Check that we fail properly at the limit of allocation */
956	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
957				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
958				GFP_KERNEL) != 0);
959	XA_BUG_ON(xa, id != 0xfffffffeU);
960	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
961				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
962				GFP_KERNEL) != 0);
963	XA_BUG_ON(xa, id != 0xffffffffU);
964	id = 3;
965	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
966				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
967				GFP_KERNEL) != -EBUSY);
968	XA_BUG_ON(xa, id != 3);
969	xa_destroy(xa);
970
971	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
972				GFP_KERNEL) != -EBUSY);
973	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
974	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
975				GFP_KERNEL) != -EBUSY);
976	xa_erase_index(xa, 3);
977	XA_BUG_ON(xa, !xa_empty(xa));
978}
979
980static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
981{
982	unsigned int i, id;
983	unsigned long index;
984	void *entry;
985
986	/* Allocate and free a NULL and check xa_empty() behaves */
987	XA_BUG_ON(xa, !xa_empty(xa));
988	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
989	XA_BUG_ON(xa, id != base);
990	XA_BUG_ON(xa, xa_empty(xa));
991	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
992	XA_BUG_ON(xa, !xa_empty(xa));
993
994	/* Ditto, but check destroy instead of erase */
995	XA_BUG_ON(xa, !xa_empty(xa));
996	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
997	XA_BUG_ON(xa, id != base);
998	XA_BUG_ON(xa, xa_empty(xa));
999	xa_destroy(xa);
1000	XA_BUG_ON(xa, !xa_empty(xa));
1001
1002	for (i = base; i < base + 10; i++) {
1003		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
1004					GFP_KERNEL) != 0);
1005		XA_BUG_ON(xa, id != i);
1006	}
1007
1008	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
1009	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
1010	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
1011	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
1012	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
1013	XA_BUG_ON(xa, id != 5);
1014
1015	xa_for_each(xa, index, entry) {
1016		xa_erase_index(xa, index);
1017	}
1018
1019	for (i = base; i < base + 9; i++) {
1020		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
1021		XA_BUG_ON(xa, xa_empty(xa));
1022	}
1023	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
1024	XA_BUG_ON(xa, xa_empty(xa));
1025	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
1026	XA_BUG_ON(xa, !xa_empty(xa));
1027
1028	xa_destroy(xa);
1029}
1030
1031static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
1032{
1033	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
1034	u32 next = 0;
1035	unsigned int i, id;
1036	unsigned long index;
1037	void *entry;
1038
1039	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
1040				&next, GFP_KERNEL) != 0);
1041	XA_BUG_ON(xa, id != 1);
1042
1043	next = 0x3ffd;
1044	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
1045				&next, GFP_KERNEL) != 0);
1046	XA_BUG_ON(xa, id != 0x3ffd);
1047	xa_erase_index(xa, 0x3ffd);
1048	xa_erase_index(xa, 1);
1049	XA_BUG_ON(xa, !xa_empty(xa));
1050
1051	for (i = 0x3ffe; i < 0x4003; i++) {
1052		if (i < 0x4000)
1053			entry = xa_mk_index(i);
1054		else
1055			entry = xa_mk_index(i - 0x3fff);
1056		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
1057					&next, GFP_KERNEL) != (id == 1));
1058		XA_BUG_ON(xa, xa_mk_index(id) != entry);
1059	}
1060
1061	/* Check wrap-around is handled correctly */
1062	if (base != 0)
1063		xa_erase_index(xa, base);
1064	xa_erase_index(xa, base + 1);
1065	next = UINT_MAX;
1066	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
1067				xa_limit_32b, &next, GFP_KERNEL) != 0);
1068	XA_BUG_ON(xa, id != UINT_MAX);
1069	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
1070				xa_limit_32b, &next, GFP_KERNEL) != 1);
1071	XA_BUG_ON(xa, id != base);
1072	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
1073				xa_limit_32b, &next, GFP_KERNEL) != 0);
1074	XA_BUG_ON(xa, id != base + 1);
1075
1076	xa_for_each(xa, index, entry)
1077		xa_erase_index(xa, index);
1078
1079	XA_BUG_ON(xa, !xa_empty(xa));
1080}
1081
1082static DEFINE_XARRAY_ALLOC(xa0);
1083static DEFINE_XARRAY_ALLOC1(xa1);
1084
1085static noinline void check_xa_alloc(void)
1086{
1087	check_xa_alloc_1(&xa0, 0);
1088	check_xa_alloc_1(&xa1, 1);
1089	check_xa_alloc_2(&xa0, 0);
1090	check_xa_alloc_2(&xa1, 1);
1091	check_xa_alloc_3(&xa0, 0);
1092	check_xa_alloc_3(&xa1, 1);
1093}
1094
1095static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
1096			unsigned int order, unsigned int present)
1097{
1098	XA_STATE_ORDER(xas, xa, start, order);
1099	void *entry;
1100	unsigned int count = 0;
1101
1102retry:
1103	xas_lock(&xas);
1104	xas_for_each_conflict(&xas, entry) {
1105		XA_BUG_ON(xa, !xa_is_value(entry));
1106		XA_BUG_ON(xa, entry < xa_mk_index(start));
1107		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
1108		count++;
1109	}
1110	xas_store(&xas, xa_mk_index(start));
1111	xas_unlock(&xas);
1112	if (xas_nomem(&xas, GFP_KERNEL)) {
1113		count = 0;
1114		goto retry;
1115	}
1116	XA_BUG_ON(xa, xas_error(&xas));
1117	XA_BUG_ON(xa, count != present);
1118	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
1119	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
1120			xa_mk_index(start));
1121	xa_erase_index(xa, start);
1122}
1123
1124static noinline void check_store_iter(struct xarray *xa)
1125{
1126	unsigned int i, j;
1127	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
1128
1129	for (i = 0; i < max_order; i++) {
1130		unsigned int min = 1 << i;
1131		unsigned int max = (2 << i) - 1;
1132		__check_store_iter(xa, 0, i, 0);
1133		XA_BUG_ON(xa, !xa_empty(xa));
1134		__check_store_iter(xa, min, i, 0);
1135		XA_BUG_ON(xa, !xa_empty(xa));
1136
1137		xa_store_index(xa, min, GFP_KERNEL);
1138		__check_store_iter(xa, min, i, 1);
1139		XA_BUG_ON(xa, !xa_empty(xa));
1140		xa_store_index(xa, max, GFP_KERNEL);
1141		__check_store_iter(xa, min, i, 1);
1142		XA_BUG_ON(xa, !xa_empty(xa));
1143
1144		for (j = 0; j < min; j++)
1145			xa_store_index(xa, j, GFP_KERNEL);
1146		__check_store_iter(xa, 0, i, min);
1147		XA_BUG_ON(xa, !xa_empty(xa));
1148		for (j = 0; j < min; j++)
1149			xa_store_index(xa, min + j, GFP_KERNEL);
1150		__check_store_iter(xa, min, i, min);
1151		XA_BUG_ON(xa, !xa_empty(xa));
1152	}
1153#ifdef CONFIG_XARRAY_MULTI
1154	xa_store_index(xa, 63, GFP_KERNEL);
1155	xa_store_index(xa, 65, GFP_KERNEL);
1156	__check_store_iter(xa, 64, 2, 1);
1157	xa_erase_index(xa, 63);
1158#endif
1159	XA_BUG_ON(xa, !xa_empty(xa));
1160}
1161
1162static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
1163{
1164#ifdef CONFIG_XARRAY_MULTI
1165	unsigned long multi = 3 << order;
1166	unsigned long next = 4 << order;
1167	unsigned long index;
1168
1169	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
1170	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
1171	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
1172
1173	index = 0;
1174	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
1175			xa_mk_value(multi));
1176	XA_BUG_ON(xa, index != multi);
1177	index = multi + 1;
1178	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
1179			xa_mk_value(multi));
1180	XA_BUG_ON(xa, (index < multi) || (index >= next));
1181	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
1182			xa_mk_value(next));
1183	XA_BUG_ON(xa, index != next);
1184	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
1185	XA_BUG_ON(xa, index != next);
1186
1187	xa_erase_index(xa, multi);
1188	xa_erase_index(xa, next);
1189	xa_erase_index(xa, next + 1);
1190	XA_BUG_ON(xa, !xa_empty(xa));
1191#endif
1192}
1193
1194static noinline void check_multi_find_2(struct xarray *xa)
1195{
1196	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
1197	unsigned int i, j;
1198	void *entry;
1199
1200	for (i = 0; i < max_order; i++) {
1201		unsigned long index = 1UL << i;
1202		for (j = 0; j < index; j++) {
1203			XA_STATE(xas, xa, j + index);
1204			xa_store_index(xa, index - 1, GFP_KERNEL);
1205			xa_store_order(xa, index, i, xa_mk_index(index),
1206					GFP_KERNEL);
1207			rcu_read_lock();
1208			xas_for_each(&xas, entry, ULONG_MAX) {
1209				xa_erase_index(xa, index);
1210			}
1211			rcu_read_unlock();
1212			xa_erase_index(xa, index - 1);
1213			XA_BUG_ON(xa, !xa_empty(xa));
1214		}
1215	}
1216}
1217
1218static noinline void check_multi_find_3(struct xarray *xa)
1219{
1220	unsigned int order;
1221
1222	for (order = 5; order < order_limit; order++) {
1223		unsigned long index = 1UL << (order - 5);
1224
1225		XA_BUG_ON(xa, !xa_empty(xa));
1226		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
1227		XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
1228		xa_erase_index(xa, 0);
1229	}
1230}
1231
1232static noinline void check_find_1(struct xarray *xa)
1233{
1234	unsigned long i, j, k;
1235
1236	XA_BUG_ON(xa, !xa_empty(xa));
1237
1238	/*
1239	 * Check xa_find with all pairs between 0 and 99 inclusive,
1240	 * starting at every index between 0 and 99
1241	 */
1242	for (i = 0; i < 100; i++) {
1243		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1244		xa_set_mark(xa, i, XA_MARK_0);
1245		for (j = 0; j < i; j++) {
1246			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
1247					NULL);
1248			xa_set_mark(xa, j, XA_MARK_0);
1249			for (k = 0; k < 100; k++) {
1250				unsigned long index = k;
1251				void *entry = xa_find(xa, &index, ULONG_MAX,
1252								XA_PRESENT);
1253				if (k <= j)
1254					XA_BUG_ON(xa, index != j);
1255				else if (k <= i)
1256					XA_BUG_ON(xa, index != i);
1257				else
1258					XA_BUG_ON(xa, entry != NULL);
1259
1260				index = k;
1261				entry = xa_find(xa, &index, ULONG_MAX,
1262								XA_MARK_0);
1263				if (k <= j)
1264					XA_BUG_ON(xa, index != j);
1265				else if (k <= i)
1266					XA_BUG_ON(xa, index != i);
1267				else
1268					XA_BUG_ON(xa, entry != NULL);
1269			}
1270			xa_erase_index(xa, j);
1271			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1272			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1273		}
1274		xa_erase_index(xa, i);
1275		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1276	}
1277	XA_BUG_ON(xa, !xa_empty(xa));
1278}
1279
1280static noinline void check_find_2(struct xarray *xa)
1281{
1282	void *entry;
1283	unsigned long i, j, index;
1284
1285	xa_for_each(xa, index, entry) {
1286		XA_BUG_ON(xa, true);
1287	}
1288
1289	for (i = 0; i < 1024; i++) {
1290		xa_store_index(xa, index, GFP_KERNEL);
1291		j = 0;
1292		xa_for_each(xa, index, entry) {
1293			XA_BUG_ON(xa, xa_mk_index(index) != entry);
1294			XA_BUG_ON(xa, index != j++);
1295		}
1296	}
1297
1298	xa_destroy(xa);
1299}
1300
1301static noinline void check_find_3(struct xarray *xa)
1302{
1303	XA_STATE(xas, xa, 0);
1304	unsigned long i, j, k;
1305	void *entry;
1306
1307	for (i = 0; i < 100; i++) {
1308		for (j = 0; j < 100; j++) {
1309			rcu_read_lock();
1310			for (k = 0; k < 100; k++) {
1311				xas_set(&xas, j);
1312				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
1313					;
1314				if (j > k)
1315					XA_BUG_ON(xa,
1316						xas.xa_node != XAS_RESTART);
1317			}
1318			rcu_read_unlock();
1319		}
1320		xa_store_index(xa, i, GFP_KERNEL);
1321		xa_set_mark(xa, i, XA_MARK_0);
1322	}
1323	xa_destroy(xa);
1324}
1325
1326static noinline void check_find_4(struct xarray *xa)
1327{
1328	unsigned long index = 0;
1329	void *entry;
1330
1331	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1332
1333	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1334	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1335
1336	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1337	XA_BUG_ON(xa, entry);
1338
1339	xa_erase_index(xa, ULONG_MAX);
1340}
1341
1342static noinline void check_find(struct xarray *xa)
1343{
1344	unsigned i;
1345
1346	check_find_1(xa);
1347	check_find_2(xa);
1348	check_find_3(xa);
1349	check_find_4(xa);
1350
1351	for (i = 2; i < 10; i++)
1352		check_multi_find_1(xa, i);
1353	check_multi_find_2(xa);
1354	check_multi_find_3(xa);
1355}
1356
1357/* See find_swap_entry() in mm/shmem.c */
1358static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1359{
1360	XA_STATE(xas, xa, 0);
1361	unsigned int checked = 0;
1362	void *entry;
1363
1364	rcu_read_lock();
1365	xas_for_each(&xas, entry, ULONG_MAX) {
1366		if (xas_retry(&xas, entry))
1367			continue;
1368		if (entry == item)
1369			break;
1370		checked++;
1371		if ((checked % 4) != 0)
1372			continue;
1373		xas_pause(&xas);
1374	}
1375	rcu_read_unlock();
1376
1377	return entry ? xas.xa_index : -1;
1378}
1379
1380static noinline void check_find_entry(struct xarray *xa)
1381{
1382#ifdef CONFIG_XARRAY_MULTI
1383	unsigned int order;
1384	unsigned long offset, index;
1385
1386	for (order = 0; order < 20; order++) {
1387		for (offset = 0; offset < (1UL << (order + 3));
1388		     offset += (1UL << order)) {
1389			for (index = 0; index < (1UL << (order + 5));
1390			     index += (1UL << order)) {
1391				xa_store_order(xa, index, order,
1392						xa_mk_index(index), GFP_KERNEL);
1393				XA_BUG_ON(xa, xa_load(xa, index) !=
1394						xa_mk_index(index));
1395				XA_BUG_ON(xa, xa_find_entry(xa,
1396						xa_mk_index(index)) != index);
1397			}
1398			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1399			xa_destroy(xa);
1400		}
1401	}
1402#endif
1403
1404	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1405	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1406	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1407	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1408	xa_erase_index(xa, ULONG_MAX);
1409	XA_BUG_ON(xa, !xa_empty(xa));
1410}
1411
1412static noinline void check_pause(struct xarray *xa)
1413{
1414	XA_STATE(xas, xa, 0);
1415	void *entry;
1416	unsigned int order;
1417	unsigned long index = 1;
1418	unsigned int count = 0;
1419
1420	for (order = 0; order < order_limit; order++) {
1421		XA_BUG_ON(xa, xa_store_order(xa, index, order,
1422					xa_mk_index(index), GFP_KERNEL));
1423		index += 1UL << order;
1424	}
1425
1426	rcu_read_lock();
1427	xas_for_each(&xas, entry, ULONG_MAX) {
1428		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1429		count++;
1430	}
1431	rcu_read_unlock();
1432	XA_BUG_ON(xa, count != order_limit);
1433
1434	count = 0;
1435	xas_set(&xas, 0);
1436	rcu_read_lock();
1437	xas_for_each(&xas, entry, ULONG_MAX) {
1438		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1439		count++;
1440		xas_pause(&xas);
1441	}
1442	rcu_read_unlock();
1443	XA_BUG_ON(xa, count != order_limit);
1444
1445	xa_destroy(xa);
1446}
1447
1448static noinline void check_move_tiny(struct xarray *xa)
1449{
1450	XA_STATE(xas, xa, 0);
1451
1452	XA_BUG_ON(xa, !xa_empty(xa));
1453	rcu_read_lock();
1454	XA_BUG_ON(xa, xas_next(&xas) != NULL);
1455	XA_BUG_ON(xa, xas_next(&xas) != NULL);
1456	rcu_read_unlock();
1457	xa_store_index(xa, 0, GFP_KERNEL);
1458	rcu_read_lock();
1459	xas_set(&xas, 0);
1460	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1461	XA_BUG_ON(xa, xas_next(&xas) != NULL);
1462	xas_set(&xas, 0);
1463	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1464	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1465	rcu_read_unlock();
1466	xa_erase_index(xa, 0);
1467	XA_BUG_ON(xa, !xa_empty(xa));
1468}
1469
1470static noinline void check_move_max(struct xarray *xa)
1471{
1472	XA_STATE(xas, xa, 0);
1473
1474	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1475	rcu_read_lock();
1476	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1477	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1478	rcu_read_unlock();
1479
1480	xas_set(&xas, 0);
1481	rcu_read_lock();
1482	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1483	xas_pause(&xas);
1484	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1485	rcu_read_unlock();
1486
1487	xa_erase_index(xa, ULONG_MAX);
1488	XA_BUG_ON(xa, !xa_empty(xa));
1489}
1490
1491static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1492{
1493	XA_STATE(xas, xa, 0);
1494	unsigned long i;
1495
1496	xa_store_index(xa, 0, GFP_KERNEL);
1497	xa_store_index(xa, idx, GFP_KERNEL);
1498
1499	rcu_read_lock();
1500	for (i = 0; i < idx * 4; i++) {
1501		void *entry = xas_next(&xas);
1502		if (i <= idx)
1503			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1504		XA_BUG_ON(xa, xas.xa_index != i);
1505		if (i == 0 || i == idx)
1506			XA_BUG_ON(xa, entry != xa_mk_index(i));
1507		else
1508			XA_BUG_ON(xa, entry != NULL);
1509	}
1510	xas_next(&xas);
1511	XA_BUG_ON(xa, xas.xa_index != i);
1512
1513	do {
1514		void *entry = xas_prev(&xas);
1515		i--;
1516		if (i <= idx)
1517			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1518		XA_BUG_ON(xa, xas.xa_index != i);
1519		if (i == 0 || i == idx)
1520			XA_BUG_ON(xa, entry != xa_mk_index(i));
1521		else
1522			XA_BUG_ON(xa, entry != NULL);
1523	} while (i > 0);
1524
1525	xas_set(&xas, ULONG_MAX);
1526	XA_BUG_ON(xa, xas_next(&xas) != NULL);
1527	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1528	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
1529	XA_BUG_ON(xa, xas.xa_index != 0);
1530	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1531	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1532	rcu_read_unlock();
1533
1534	xa_erase_index(xa, 0);
1535	xa_erase_index(xa, idx);
1536	XA_BUG_ON(xa, !xa_empty(xa));
1537}
1538
1539static noinline void check_move(struct xarray *xa)
1540{
1541	XA_STATE(xas, xa, (1 << 16) - 1);
1542	unsigned long i;
1543
1544	for (i = 0; i < (1 << 16); i++)
1545		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1546
1547	rcu_read_lock();
1548	do {
1549		void *entry = xas_prev(&xas);
1550		i--;
1551		XA_BUG_ON(xa, entry != xa_mk_index(i));
1552		XA_BUG_ON(xa, i != xas.xa_index);
1553	} while (i != 0);
1554
1555	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1556	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1557
1558	do {
1559		void *entry = xas_next(&xas);
1560		XA_BUG_ON(xa, entry != xa_mk_index(i));
1561		XA_BUG_ON(xa, i != xas.xa_index);
1562		i++;
1563	} while (i < (1 << 16));
1564	rcu_read_unlock();
1565
1566	for (i = (1 << 8); i < (1 << 15); i++)
1567		xa_erase_index(xa, i);
1568
1569	i = xas.xa_index;
1570
1571	rcu_read_lock();
1572	do {
1573		void *entry = xas_prev(&xas);
1574		i--;
1575		if ((i < (1 << 8)) || (i >= (1 << 15)))
1576			XA_BUG_ON(xa, entry != xa_mk_index(i));
1577		else
1578			XA_BUG_ON(xa, entry != NULL);
1579		XA_BUG_ON(xa, i != xas.xa_index);
1580	} while (i != 0);
1581
1582	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1583	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1584
1585	do {
1586		void *entry = xas_next(&xas);
1587		if ((i < (1 << 8)) || (i >= (1 << 15)))
1588			XA_BUG_ON(xa, entry != xa_mk_index(i));
1589		else
1590			XA_BUG_ON(xa, entry != NULL);
1591		XA_BUG_ON(xa, i != xas.xa_index);
1592		i++;
1593	} while (i < (1 << 16));
1594	rcu_read_unlock();
1595
1596	xa_destroy(xa);
1597
1598	check_move_tiny(xa);
1599	check_move_max(xa);
1600
1601	for (i = 0; i < 16; i++)
1602		check_move_small(xa, 1UL << i);
1603
1604	for (i = 2; i < 16; i++)
1605		check_move_small(xa, (1UL << i) - 1);
1606}
1607
1608static noinline void xa_store_many_order(struct xarray *xa,
1609		unsigned long index, unsigned order)
1610{
1611	XA_STATE_ORDER(xas, xa, index, order);
1612	unsigned int i = 0;
1613
1614	do {
1615		xas_lock(&xas);
1616		XA_BUG_ON(xa, xas_find_conflict(&xas));
1617		xas_create_range(&xas);
1618		if (xas_error(&xas))
1619			goto unlock;
1620		for (i = 0; i < (1U << order); i++) {
1621			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
1622			xas_next(&xas);
1623		}
1624unlock:
1625		xas_unlock(&xas);
1626	} while (xas_nomem(&xas, GFP_KERNEL));
1627
1628	XA_BUG_ON(xa, xas_error(&xas));
1629}
1630
1631static noinline void check_create_range_1(struct xarray *xa,
1632		unsigned long index, unsigned order)
1633{
1634	unsigned long i;
1635
1636	xa_store_many_order(xa, index, order);
1637	for (i = index; i < index + (1UL << order); i++)
1638		xa_erase_index(xa, i);
1639	XA_BUG_ON(xa, !xa_empty(xa));
1640}
1641
1642static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1643{
1644	unsigned long i;
1645	unsigned long nr = 1UL << order;
1646
1647	for (i = 0; i < nr * nr; i += nr)
1648		xa_store_many_order(xa, i, order);
1649	for (i = 0; i < nr * nr; i++)
1650		xa_erase_index(xa, i);
1651	XA_BUG_ON(xa, !xa_empty(xa));
1652}
1653
1654static noinline void check_create_range_3(void)
1655{
1656	XA_STATE(xas, NULL, 0);
1657	xas_set_err(&xas, -EEXIST);
1658	xas_create_range(&xas);
1659	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1660}
1661
1662static noinline void check_create_range_4(struct xarray *xa,
1663		unsigned long index, unsigned order)
1664{
1665	XA_STATE_ORDER(xas, xa, index, order);
1666	unsigned long base = xas.xa_index;
1667	unsigned long i = 0;
1668
1669	xa_store_index(xa, index, GFP_KERNEL);
1670	do {
1671		xas_lock(&xas);
1672		xas_create_range(&xas);
1673		if (xas_error(&xas))
1674			goto unlock;
1675		for (i = 0; i < (1UL << order); i++) {
1676			void *old = xas_store(&xas, xa_mk_index(base + i));
1677			if (xas.xa_index == index)
1678				XA_BUG_ON(xa, old != xa_mk_index(base + i));
1679			else
1680				XA_BUG_ON(xa, old != NULL);
1681			xas_next(&xas);
1682		}
1683unlock:
1684		xas_unlock(&xas);
1685	} while (xas_nomem(&xas, GFP_KERNEL));
1686
1687	XA_BUG_ON(xa, xas_error(&xas));
1688
1689	for (i = base; i < base + (1UL << order); i++)
1690		xa_erase_index(xa, i);
1691	XA_BUG_ON(xa, !xa_empty(xa));
1692}
1693
1694static noinline void check_create_range_5(struct xarray *xa,
1695		unsigned long index, unsigned int order)
1696{
1697	XA_STATE_ORDER(xas, xa, index, order);
1698	unsigned int i;
1699
1700	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
1701
1702	for (i = 0; i < order + 10; i++) {
1703		do {
1704			xas_lock(&xas);
1705			xas_create_range(&xas);
1706			xas_unlock(&xas);
1707		} while (xas_nomem(&xas, GFP_KERNEL));
1708	}
1709
1710	xa_destroy(xa);
1711}
1712
1713static noinline void check_create_range(struct xarray *xa)
1714{
1715	unsigned int order;
1716	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1717
1718	for (order = 0; order < max_order; order++) {
1719		check_create_range_1(xa, 0, order);
1720		check_create_range_1(xa, 1U << order, order);
1721		check_create_range_1(xa, 2U << order, order);
1722		check_create_range_1(xa, 3U << order, order);
1723		check_create_range_1(xa, 1U << 24, order);
1724		if (order < 10)
1725			check_create_range_2(xa, order);
1726
1727		check_create_range_4(xa, 0, order);
1728		check_create_range_4(xa, 1U << order, order);
1729		check_create_range_4(xa, 2U << order, order);
1730		check_create_range_4(xa, 3U << order, order);
1731		check_create_range_4(xa, 1U << 24, order);
1732
1733		check_create_range_4(xa, 1, order);
1734		check_create_range_4(xa, (1U << order) + 1, order);
1735		check_create_range_4(xa, (2U << order) + 1, order);
1736		check_create_range_4(xa, (2U << order) - 1, order);
1737		check_create_range_4(xa, (3U << order) + 1, order);
1738		check_create_range_4(xa, (3U << order) - 1, order);
1739		check_create_range_4(xa, (1U << 24) + 1, order);
1740
1741		check_create_range_5(xa, 0, order);
1742		check_create_range_5(xa, (1U << order), order);
1743	}
1744
1745	check_create_range_3();
1746}
1747
1748static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1749		unsigned long last)
1750{
1751#ifdef CONFIG_XARRAY_MULTI
1752	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1753
1754	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1755	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1756	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1757	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1758
1759	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1760#endif
1761
1762	XA_BUG_ON(xa, !xa_empty(xa));
1763}
1764
1765static noinline void check_store_range(struct xarray *xa)
1766{
1767	unsigned long i, j;
1768
1769	for (i = 0; i < 128; i++) {
1770		for (j = i; j < 128; j++) {
1771			__check_store_range(xa, i, j);
1772			__check_store_range(xa, 128 + i, 128 + j);
1773			__check_store_range(xa, 4095 + i, 4095 + j);
1774			__check_store_range(xa, 4096 + i, 4096 + j);
1775			__check_store_range(xa, 123456 + i, 123456 + j);
1776			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1777		}
1778	}
1779}
1780
1781#ifdef CONFIG_XARRAY_MULTI
1782static void check_split_1(struct xarray *xa, unsigned long index,
1783				unsigned int order, unsigned int new_order)
1784{
1785	XA_STATE_ORDER(xas, xa, index, new_order);
1786	unsigned int i;
1787
1788	xa_store_order(xa, index, order, xa, GFP_KERNEL);
1789
1790	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
1791	xas_lock(&xas);
1792	xas_split(&xas, xa, order);
1793	for (i = 0; i < (1 << order); i += (1 << new_order))
1794		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
1795	xas_unlock(&xas);
1796
1797	for (i = 0; i < (1 << order); i++) {
1798		unsigned int val = index + (i & ~((1 << new_order) - 1));
1799		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
1800	}
1801
1802	xa_set_mark(xa, index, XA_MARK_0);
1803	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1804
1805	xa_destroy(xa);
1806}
1807
1808static noinline void check_split(struct xarray *xa)
1809{
1810	unsigned int order, new_order;
1811
1812	XA_BUG_ON(xa, !xa_empty(xa));
1813
1814	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
1815		for (new_order = 0; new_order < order; new_order++) {
1816			check_split_1(xa, 0, order, new_order);
1817			check_split_1(xa, 1UL << order, order, new_order);
1818			check_split_1(xa, 3UL << order, order, new_order);
1819		}
1820	}
1821}
1822#else
1823static void check_split(struct xarray *xa) { }
1824#endif
1825
1826static void check_align_1(struct xarray *xa, char *name)
1827{
1828	int i;
1829	unsigned int id;
1830	unsigned long index;
1831	void *entry;
1832
1833	for (i = 0; i < 8; i++) {
1834		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1835					GFP_KERNEL) != 0);
1836		XA_BUG_ON(xa, id != i);
1837	}
1838	xa_for_each(xa, index, entry)
1839		XA_BUG_ON(xa, xa_is_err(entry));
1840	xa_destroy(xa);
1841}
1842
1843/*
1844 * We should always be able to store without allocating memory after
1845 * reserving a slot.
1846 */
1847static void check_align_2(struct xarray *xa, char *name)
1848{
1849	int i;
1850
1851	XA_BUG_ON(xa, !xa_empty(xa));
1852
1853	for (i = 0; i < 8; i++) {
1854		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
1855		xa_erase(xa, 0);
1856	}
1857
1858	for (i = 0; i < 8; i++) {
1859		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
1860		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
1861		xa_erase(xa, 0);
1862	}
1863
1864	XA_BUG_ON(xa, !xa_empty(xa));
1865}
1866
1867static noinline void check_align(struct xarray *xa)
1868{
1869	char name[] = "Motorola 68000";
1870
1871	check_align_1(xa, name);
1872	check_align_1(xa, name + 1);
1873	check_align_1(xa, name + 2);
1874	check_align_1(xa, name + 3);
1875	check_align_2(xa, name);
1876}
1877
1878static LIST_HEAD(shadow_nodes);
1879
1880static void test_update_node(struct xa_node *node)
1881{
1882	if (node->count && node->count == node->nr_values) {
1883		if (list_empty(&node->private_list))
1884			list_add(&shadow_nodes, &node->private_list);
1885	} else {
1886		if (!list_empty(&node->private_list))
1887			list_del_init(&node->private_list);
1888	}
1889}
1890
1891static noinline void shadow_remove(struct xarray *xa)
1892{
1893	struct xa_node *node;
1894
1895	xa_lock(xa);
1896	while ((node = list_first_entry_or_null(&shadow_nodes,
1897					struct xa_node, private_list))) {
1898		XA_BUG_ON(xa, node->array != xa);
1899		list_del_init(&node->private_list);
1900		xa_delete_node(node, test_update_node);
1901	}
1902	xa_unlock(xa);
1903}
1904
1905static noinline void check_workingset(struct xarray *xa, unsigned long index)
1906{
1907	XA_STATE(xas, xa, index);
1908	xas_set_update(&xas, test_update_node);
1909
1910	do {
1911		xas_lock(&xas);
1912		xas_store(&xas, xa_mk_value(0));
1913		xas_next(&xas);
1914		xas_store(&xas, xa_mk_value(1));
1915		xas_unlock(&xas);
1916	} while (xas_nomem(&xas, GFP_KERNEL));
1917
1918	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1919
1920	xas_lock(&xas);
1921	xas_next(&xas);
1922	xas_store(&xas, &xas);
1923	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1924
1925	xas_store(&xas, xa_mk_value(2));
1926	xas_unlock(&xas);
1927	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1928
1929	shadow_remove(xa);
1930	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1931	XA_BUG_ON(xa, !xa_empty(xa));
1932}
1933
1934/*
1935 * Check that the pointer / value / sibling entries are accounted the
1936 * way we expect them to be.
1937 */
1938static noinline void check_account(struct xarray *xa)
1939{
1940#ifdef CONFIG_XARRAY_MULTI
1941	unsigned int order;
1942
1943	for (order = 1; order < 12; order++) {
1944		XA_STATE(xas, xa, 1 << order);
1945
1946		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1947		rcu_read_lock();
1948		xas_load(&xas);
1949		XA_BUG_ON(xa, xas.xa_node->count == 0);
1950		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1951		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1952		rcu_read_unlock();
1953
1954		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1955				GFP_KERNEL);
1956		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1957
1958		xa_erase(xa, 1 << order);
1959		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1960
1961		xa_erase(xa, 0);
1962		XA_BUG_ON(xa, !xa_empty(xa));
1963	}
1964#endif
1965}
1966
1967static noinline void check_get_order(struct xarray *xa)
1968{
1969	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
1970	unsigned int order;
1971	unsigned long i, j;
1972
1973	for (i = 0; i < 3; i++)
1974		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
1975
1976	for (order = 0; order < max_order; order++) {
1977		for (i = 0; i < 10; i++) {
1978			xa_store_order(xa, i << order, order,
1979					xa_mk_index(i << order), GFP_KERNEL);
1980			for (j = i << order; j < (i + 1) << order; j++)
1981				XA_BUG_ON(xa, xa_get_order(xa, j) != order);
1982			xa_erase(xa, i << order);
1983		}
1984	}
1985}
1986
1987static noinline void check_destroy(struct xarray *xa)
1988{
1989	unsigned long index;
1990
1991	XA_BUG_ON(xa, !xa_empty(xa));
1992
1993	/* Destroying an empty array is a no-op */
1994	xa_destroy(xa);
1995	XA_BUG_ON(xa, !xa_empty(xa));
1996
1997	/* Destroying an array with a single entry */
1998	for (index = 0; index < 1000; index++) {
1999		xa_store_index(xa, index, GFP_KERNEL);
2000		XA_BUG_ON(xa, xa_empty(xa));
2001		xa_destroy(xa);
2002		XA_BUG_ON(xa, !xa_empty(xa));
2003	}
2004
2005	/* Destroying an array with a single entry at ULONG_MAX */
2006	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
2007	XA_BUG_ON(xa, xa_empty(xa));
2008	xa_destroy(xa);
2009	XA_BUG_ON(xa, !xa_empty(xa));
2010
2011#ifdef CONFIG_XARRAY_MULTI
2012	/* Destroying an array with a multi-index entry */
2013	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
2014	XA_BUG_ON(xa, xa_empty(xa));
2015	xa_destroy(xa);
2016	XA_BUG_ON(xa, !xa_empty(xa));
2017#endif
2018}
2019
2020static DEFINE_XARRAY(array);
2021
2022static int xarray_checks(void)
2023{
2024	check_xa_err(&array);
2025	check_xas_retry(&array);
2026	check_xa_load(&array);
2027	check_xa_mark(&array);
2028	check_xa_shrink(&array);
2029	check_xas_erase(&array);
2030	check_insert(&array);
2031	check_cmpxchg(&array);
2032	check_cmpxchg_order(&array);
2033	check_reserve(&array);
2034	check_reserve(&xa0);
2035	check_multi_store(&array);
2036	check_multi_store_advanced(&array);
2037	check_get_order(&array);
2038	check_xa_alloc();
2039	check_find(&array);
2040	check_find_entry(&array);
2041	check_pause(&array);
2042	check_account(&array);
2043	check_destroy(&array);
2044	check_move(&array);
2045	check_create_range(&array);
2046	check_store_range(&array);
2047	check_store_iter(&array);
2048	check_align(&xa0);
2049	check_split(&array);
2050
2051	check_workingset(&array, 0);
2052	check_workingset(&array, 64);
2053	check_workingset(&array, 4096);
2054
2055	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
2056	return (tests_run == tests_passed) ? 0 : -EINVAL;
2057}
2058
2059static void xarray_exit(void)
2060{
2061}
2062
2063module_init(xarray_checks);
2064module_exit(xarray_exit);
2065MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
2066MODULE_LICENSE("GPL");
2067