1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7#include <kunit/test.h>
8#include "internal.h"
9
10#define BLOCK_TEST_SIZE 12
11
12static void get_changed_bytes(void *orig, void *new, size_t size)
13{
14	char *o = orig;
15	char *n = new;
16	int i;
17
18	get_random_bytes(new, size);
19
20	/*
21	 * This could be nicer and more efficient but we shouldn't
22	 * super care.
23	 */
24	for (i = 0; i < size; i++)
25		while (n[i] == o[i])
26			get_random_bytes(&n[i], 1);
27}
28
29static const struct regmap_config test_regmap_config = {
30	.max_register = BLOCK_TEST_SIZE,
31	.reg_stride = 1,
32	.val_bits = sizeof(unsigned int) * 8,
33};
34
35struct regcache_types {
36	enum regcache_type type;
37	const char *name;
38};
39
40static void case_to_desc(const struct regcache_types *t, char *desc)
41{
42	strcpy(desc, t->name);
43}
44
45static const struct regcache_types regcache_types_list[] = {
46	{ REGCACHE_NONE, "none" },
47	{ REGCACHE_FLAT, "flat" },
48	{ REGCACHE_RBTREE, "rbtree" },
49	{ REGCACHE_MAPLE, "maple" },
50};
51
52KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
53
54static const struct regcache_types real_cache_types_list[] = {
55	{ REGCACHE_FLAT, "flat" },
56	{ REGCACHE_RBTREE, "rbtree" },
57	{ REGCACHE_MAPLE, "maple" },
58};
59
60KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
61
62static const struct regcache_types sparse_cache_types_list[] = {
63	{ REGCACHE_RBTREE, "rbtree" },
64	{ REGCACHE_MAPLE, "maple" },
65};
66
67KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
68
69static struct regmap *gen_regmap(struct regmap_config *config,
70				 struct regmap_ram_data **data)
71{
72	unsigned int *buf;
73	struct regmap *ret;
74	size_t size = (config->max_register + 1) * sizeof(unsigned int);
75	int i;
76	struct reg_default *defaults;
77
78	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
79					config->cache_type == REGCACHE_MAPLE;
80
81	buf = kmalloc(size, GFP_KERNEL);
82	if (!buf)
83		return ERR_PTR(-ENOMEM);
84
85	get_random_bytes(buf, size);
86
87	*data = kzalloc(sizeof(**data), GFP_KERNEL);
88	if (!(*data))
89		return ERR_PTR(-ENOMEM);
90	(*data)->vals = buf;
91
92	if (config->num_reg_defaults) {
93		defaults = kcalloc(config->num_reg_defaults,
94				   sizeof(struct reg_default),
95				   GFP_KERNEL);
96		if (!defaults)
97			return ERR_PTR(-ENOMEM);
98		config->reg_defaults = defaults;
99
100		for (i = 0; i < config->num_reg_defaults; i++) {
101			defaults[i].reg = i * config->reg_stride;
102			defaults[i].def = buf[i * config->reg_stride];
103		}
104	}
105
106	ret = regmap_init_ram(config, *data);
107	if (IS_ERR(ret)) {
108		kfree(buf);
109		kfree(*data);
110	}
111
112	return ret;
113}
114
115static bool reg_5_false(struct device *context, unsigned int reg)
116{
117	return reg != 5;
118}
119
120static void basic_read_write(struct kunit *test)
121{
122	struct regcache_types *t = (struct regcache_types *)test->param_value;
123	struct regmap *map;
124	struct regmap_config config;
125	struct regmap_ram_data *data;
126	unsigned int val, rval;
127
128	config = test_regmap_config;
129	config.cache_type = t->type;
130
131	map = gen_regmap(&config, &data);
132	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
133	if (IS_ERR(map))
134		return;
135
136	get_random_bytes(&val, sizeof(val));
137
138	/* If we write a value to a register we can read it back */
139	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
140	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
141	KUNIT_EXPECT_EQ(test, val, rval);
142
143	/* If using a cache the cache satisfied the read */
144	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
145
146	regmap_exit(map);
147}
148
149static void bulk_write(struct kunit *test)
150{
151	struct regcache_types *t = (struct regcache_types *)test->param_value;
152	struct regmap *map;
153	struct regmap_config config;
154	struct regmap_ram_data *data;
155	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
156	int i;
157
158	config = test_regmap_config;
159	config.cache_type = t->type;
160
161	map = gen_regmap(&config, &data);
162	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
163	if (IS_ERR(map))
164		return;
165
166	get_random_bytes(&val, sizeof(val));
167
168	/*
169	 * Data written via the bulk API can be read back with single
170	 * reads.
171	 */
172	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
173						   BLOCK_TEST_SIZE));
174	for (i = 0; i < BLOCK_TEST_SIZE; i++)
175		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
176
177	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
178
179	/* If using a cache the cache satisfied the read */
180	for (i = 0; i < BLOCK_TEST_SIZE; i++)
181		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
182
183	regmap_exit(map);
184}
185
186static void bulk_read(struct kunit *test)
187{
188	struct regcache_types *t = (struct regcache_types *)test->param_value;
189	struct regmap *map;
190	struct regmap_config config;
191	struct regmap_ram_data *data;
192	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
193	int i;
194
195	config = test_regmap_config;
196	config.cache_type = t->type;
197
198	map = gen_regmap(&config, &data);
199	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
200	if (IS_ERR(map))
201		return;
202
203	get_random_bytes(&val, sizeof(val));
204
205	/* Data written as single writes can be read via the bulk API */
206	for (i = 0; i < BLOCK_TEST_SIZE; i++)
207		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
208	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
209						  BLOCK_TEST_SIZE));
210	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
211
212	/* If using a cache the cache satisfied the read */
213	for (i = 0; i < BLOCK_TEST_SIZE; i++)
214		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
215
216	regmap_exit(map);
217}
218
219static void write_readonly(struct kunit *test)
220{
221	struct regcache_types *t = (struct regcache_types *)test->param_value;
222	struct regmap *map;
223	struct regmap_config config;
224	struct regmap_ram_data *data;
225	unsigned int val;
226	int i;
227
228	config = test_regmap_config;
229	config.cache_type = t->type;
230	config.num_reg_defaults = BLOCK_TEST_SIZE;
231	config.writeable_reg = reg_5_false;
232
233	map = gen_regmap(&config, &data);
234	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
235	if (IS_ERR(map))
236		return;
237
238	get_random_bytes(&val, sizeof(val));
239
240	for (i = 0; i < BLOCK_TEST_SIZE; i++)
241		data->written[i] = false;
242
243	/* Change the value of all registers, readonly should fail */
244	for (i = 0; i < BLOCK_TEST_SIZE; i++)
245		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
246
247	/* Did that match what we see on the device? */
248	for (i = 0; i < BLOCK_TEST_SIZE; i++)
249		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
250
251	regmap_exit(map);
252}
253
254static void read_writeonly(struct kunit *test)
255{
256	struct regcache_types *t = (struct regcache_types *)test->param_value;
257	struct regmap *map;
258	struct regmap_config config;
259	struct regmap_ram_data *data;
260	unsigned int val;
261	int i;
262
263	config = test_regmap_config;
264	config.cache_type = t->type;
265	config.readable_reg = reg_5_false;
266
267	map = gen_regmap(&config, &data);
268	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
269	if (IS_ERR(map))
270		return;
271
272	for (i = 0; i < BLOCK_TEST_SIZE; i++)
273		data->read[i] = false;
274
275	/*
276	 * Try to read all the registers, the writeonly one should
277	 * fail if we aren't using the flat cache.
278	 */
279	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
280		if (t->type != REGCACHE_FLAT) {
281			KUNIT_EXPECT_EQ(test, i != 5,
282					regmap_read(map, i, &val) == 0);
283		} else {
284			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
285		}
286	}
287
288	/* Did we trigger a hardware access? */
289	KUNIT_EXPECT_FALSE(test, data->read[5]);
290
291	regmap_exit(map);
292}
293
294static void reg_defaults(struct kunit *test)
295{
296	struct regcache_types *t = (struct regcache_types *)test->param_value;
297	struct regmap *map;
298	struct regmap_config config;
299	struct regmap_ram_data *data;
300	unsigned int rval[BLOCK_TEST_SIZE];
301	int i;
302
303	config = test_regmap_config;
304	config.cache_type = t->type;
305	config.num_reg_defaults = BLOCK_TEST_SIZE;
306
307	map = gen_regmap(&config, &data);
308	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
309	if (IS_ERR(map))
310		return;
311
312	/* Read back the expected default data */
313	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
314						  BLOCK_TEST_SIZE));
315	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
316
317	/* The data should have been read from cache if there was one */
318	for (i = 0; i < BLOCK_TEST_SIZE; i++)
319		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
320}
321
322static void reg_defaults_read_dev(struct kunit *test)
323{
324	struct regcache_types *t = (struct regcache_types *)test->param_value;
325	struct regmap *map;
326	struct regmap_config config;
327	struct regmap_ram_data *data;
328	unsigned int rval[BLOCK_TEST_SIZE];
329	int i;
330
331	config = test_regmap_config;
332	config.cache_type = t->type;
333	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
334
335	map = gen_regmap(&config, &data);
336	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
337	if (IS_ERR(map))
338		return;
339
340	/* We should have read the cache defaults back from the map */
341	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
342		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
343		data->read[i] = false;
344	}
345
346	/* Read back the expected default data */
347	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
348						  BLOCK_TEST_SIZE));
349	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
350
351	/* The data should have been read from cache if there was one */
352	for (i = 0; i < BLOCK_TEST_SIZE; i++)
353		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
354}
355
356static void register_patch(struct kunit *test)
357{
358	struct regcache_types *t = (struct regcache_types *)test->param_value;
359	struct regmap *map;
360	struct regmap_config config;
361	struct regmap_ram_data *data;
362	struct reg_sequence patch[2];
363	unsigned int rval[BLOCK_TEST_SIZE];
364	int i;
365
366	/* We need defaults so readback works */
367	config = test_regmap_config;
368	config.cache_type = t->type;
369	config.num_reg_defaults = BLOCK_TEST_SIZE;
370
371	map = gen_regmap(&config, &data);
372	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
373	if (IS_ERR(map))
374		return;
375
376	/* Stash the original values */
377	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
378						  BLOCK_TEST_SIZE));
379
380	/* Patch a couple of values */
381	patch[0].reg = 2;
382	patch[0].def = rval[2] + 1;
383	patch[0].delay_us = 0;
384	patch[1].reg = 5;
385	patch[1].def = rval[5] + 1;
386	patch[1].delay_us = 0;
387	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
388						       ARRAY_SIZE(patch)));
389
390	/* Only the patched registers are written */
391	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
392		switch (i) {
393		case 2:
394		case 5:
395			KUNIT_EXPECT_TRUE(test, data->written[i]);
396			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
397			break;
398		default:
399			KUNIT_EXPECT_FALSE(test, data->written[i]);
400			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
401			break;
402		}
403	}
404
405	regmap_exit(map);
406}
407
408static void stride(struct kunit *test)
409{
410	struct regcache_types *t = (struct regcache_types *)test->param_value;
411	struct regmap *map;
412	struct regmap_config config;
413	struct regmap_ram_data *data;
414	unsigned int rval;
415	int i;
416
417	config = test_regmap_config;
418	config.cache_type = t->type;
419	config.reg_stride = 2;
420	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
421
422	map = gen_regmap(&config, &data);
423	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
424	if (IS_ERR(map))
425		return;
426
427	/* Only even registers can be accessed, try both read and write */
428	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
429		data->read[i] = false;
430		data->written[i] = false;
431
432		if (i % 2) {
433			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
434			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
435			KUNIT_EXPECT_FALSE(test, data->read[i]);
436			KUNIT_EXPECT_FALSE(test, data->written[i]);
437		} else {
438			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
439			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
440			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
441					data->read[i]);
442
443			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
444			KUNIT_EXPECT_TRUE(test, data->written[i]);
445		}
446	}
447
448	regmap_exit(map);
449}
450
451static struct regmap_range_cfg test_range = {
452	.selector_reg = 1,
453	.selector_mask = 0xff,
454
455	.window_start = 4,
456	.window_len = 10,
457
458	.range_min = 20,
459	.range_max = 40,
460};
461
462static bool test_range_window_volatile(struct device *dev, unsigned int reg)
463{
464	if (reg >= test_range.window_start &&
465	    reg <= test_range.window_start + test_range.window_len)
466		return true;
467
468	return false;
469}
470
471static bool test_range_all_volatile(struct device *dev, unsigned int reg)
472{
473	if (test_range_window_volatile(dev, reg))
474		return true;
475
476	if (reg >= test_range.range_min && reg <= test_range.range_max)
477		return true;
478
479	return false;
480}
481
482static void basic_ranges(struct kunit *test)
483{
484	struct regcache_types *t = (struct regcache_types *)test->param_value;
485	struct regmap *map;
486	struct regmap_config config;
487	struct regmap_ram_data *data;
488	unsigned int val;
489	int i;
490
491	config = test_regmap_config;
492	config.cache_type = t->type;
493	config.volatile_reg = test_range_all_volatile;
494	config.ranges = &test_range;
495	config.num_ranges = 1;
496	config.max_register = test_range.range_max;
497
498	map = gen_regmap(&config, &data);
499	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
500	if (IS_ERR(map))
501		return;
502
503	for (i = test_range.range_min; i < test_range.range_max; i++) {
504		data->read[i] = false;
505		data->written[i] = false;
506	}
507
508	/* Reset the page to a non-zero value to trigger a change */
509	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
510					      test_range.range_max));
511
512	/* Check we set the page and use the window for writes */
513	data->written[test_range.selector_reg] = false;
514	data->written[test_range.window_start] = false;
515	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
516	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
517	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
518
519	data->written[test_range.selector_reg] = false;
520	data->written[test_range.window_start] = false;
521	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
522					      test_range.range_min +
523					      test_range.window_len,
524					      0));
525	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
526	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
527
528	/* Same for reads */
529	data->written[test_range.selector_reg] = false;
530	data->read[test_range.window_start] = false;
531	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
532	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
533	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
534
535	data->written[test_range.selector_reg] = false;
536	data->read[test_range.window_start] = false;
537	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
538					     test_range.range_min +
539					     test_range.window_len,
540					     &val));
541	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
542	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
543
544	/* No physical access triggered in the virtual range */
545	for (i = test_range.range_min; i < test_range.range_max; i++) {
546		KUNIT_EXPECT_FALSE(test, data->read[i]);
547		KUNIT_EXPECT_FALSE(test, data->written[i]);
548	}
549
550	regmap_exit(map);
551}
552
553/* Try to stress dynamic creation of cache data structures */
554static void stress_insert(struct kunit *test)
555{
556	struct regcache_types *t = (struct regcache_types *)test->param_value;
557	struct regmap *map;
558	struct regmap_config config;
559	struct regmap_ram_data *data;
560	unsigned int rval, *vals;
561	size_t buf_sz;
562	int i;
563
564	config = test_regmap_config;
565	config.cache_type = t->type;
566	config.max_register = 300;
567
568	map = gen_regmap(&config, &data);
569	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
570	if (IS_ERR(map))
571		return;
572
573	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
574			     GFP_KERNEL);
575	KUNIT_ASSERT_FALSE(test, vals == NULL);
576	buf_sz = sizeof(unsigned long) * config.max_register;
577
578	get_random_bytes(vals, buf_sz);
579
580	/* Write data into the map/cache in ever decreasing strides */
581	for (i = 0; i < config.max_register; i += 100)
582		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
583	for (i = 0; i < config.max_register; i += 50)
584		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
585	for (i = 0; i < config.max_register; i += 25)
586		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
587	for (i = 0; i < config.max_register; i += 10)
588		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
589	for (i = 0; i < config.max_register; i += 5)
590		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
591	for (i = 0; i < config.max_register; i += 3)
592		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
593	for (i = 0; i < config.max_register; i += 2)
594		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
595	for (i = 0; i < config.max_register; i++)
596		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
597
598	/* Do reads from the cache (if there is one) match? */
599	for (i = 0; i < config.max_register; i ++) {
600		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
601		KUNIT_EXPECT_EQ(test, rval, vals[i]);
602		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
603	}
604
605	regmap_exit(map);
606}
607
608static void cache_bypass(struct kunit *test)
609{
610	struct regcache_types *t = (struct regcache_types *)test->param_value;
611	struct regmap *map;
612	struct regmap_config config;
613	struct regmap_ram_data *data;
614	unsigned int val, rval;
615
616	config = test_regmap_config;
617	config.cache_type = t->type;
618
619	map = gen_regmap(&config, &data);
620	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
621	if (IS_ERR(map))
622		return;
623
624	get_random_bytes(&val, sizeof(val));
625
626	/* Ensure the cache has a value in it */
627	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
628
629	/* Bypass then write a different value */
630	regcache_cache_bypass(map, true);
631	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
632
633	/* Read the bypassed value */
634	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
635	KUNIT_EXPECT_EQ(test, val + 1, rval);
636	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
637
638	/* Disable bypass, the cache should still return the original value */
639	regcache_cache_bypass(map, false);
640	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
641	KUNIT_EXPECT_EQ(test, val, rval);
642
643	regmap_exit(map);
644}
645
646static void cache_sync(struct kunit *test)
647{
648	struct regcache_types *t = (struct regcache_types *)test->param_value;
649	struct regmap *map;
650	struct regmap_config config;
651	struct regmap_ram_data *data;
652	unsigned int val[BLOCK_TEST_SIZE];
653	int i;
654
655	config = test_regmap_config;
656	config.cache_type = t->type;
657
658	map = gen_regmap(&config, &data);
659	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
660	if (IS_ERR(map))
661		return;
662
663	get_random_bytes(&val, sizeof(val));
664
665	/* Put some data into the cache */
666	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
667						   BLOCK_TEST_SIZE));
668	for (i = 0; i < BLOCK_TEST_SIZE; i++)
669		data->written[i] = false;
670
671	/* Trash the data on the device itself then resync */
672	regcache_mark_dirty(map);
673	memset(data->vals, 0, sizeof(val));
674	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
675
676	/* Did we just write the correct data out? */
677	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
678	for (i = 0; i < BLOCK_TEST_SIZE; i++)
679		KUNIT_EXPECT_EQ(test, true, data->written[i]);
680
681	regmap_exit(map);
682}
683
684static void cache_sync_defaults(struct kunit *test)
685{
686	struct regcache_types *t = (struct regcache_types *)test->param_value;
687	struct regmap *map;
688	struct regmap_config config;
689	struct regmap_ram_data *data;
690	unsigned int val;
691	int i;
692
693	config = test_regmap_config;
694	config.cache_type = t->type;
695	config.num_reg_defaults = BLOCK_TEST_SIZE;
696
697	map = gen_regmap(&config, &data);
698	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
699	if (IS_ERR(map))
700		return;
701
702	get_random_bytes(&val, sizeof(val));
703
704	/* Change the value of one register */
705	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
706
707	/* Resync */
708	regcache_mark_dirty(map);
709	for (i = 0; i < BLOCK_TEST_SIZE; i++)
710		data->written[i] = false;
711	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
712
713	/* Did we just sync the one register we touched? */
714	for (i = 0; i < BLOCK_TEST_SIZE; i++)
715		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
716
717	regmap_exit(map);
718}
719
720static void cache_sync_readonly(struct kunit *test)
721{
722	struct regcache_types *t = (struct regcache_types *)test->param_value;
723	struct regmap *map;
724	struct regmap_config config;
725	struct regmap_ram_data *data;
726	unsigned int val;
727	int i;
728
729	config = test_regmap_config;
730	config.cache_type = t->type;
731	config.writeable_reg = reg_5_false;
732
733	map = gen_regmap(&config, &data);
734	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
735	if (IS_ERR(map))
736		return;
737
738	/* Read all registers to fill the cache */
739	for (i = 0; i < BLOCK_TEST_SIZE; i++)
740		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
741
742	/* Change the value of all registers, readonly should fail */
743	get_random_bytes(&val, sizeof(val));
744	regcache_cache_only(map, true);
745	for (i = 0; i < BLOCK_TEST_SIZE; i++)
746		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
747	regcache_cache_only(map, false);
748
749	/* Resync */
750	for (i = 0; i < BLOCK_TEST_SIZE; i++)
751		data->written[i] = false;
752	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
753
754	/* Did that match what we see on the device? */
755	for (i = 0; i < BLOCK_TEST_SIZE; i++)
756		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
757
758	regmap_exit(map);
759}
760
761static void cache_sync_patch(struct kunit *test)
762{
763	struct regcache_types *t = (struct regcache_types *)test->param_value;
764	struct regmap *map;
765	struct regmap_config config;
766	struct regmap_ram_data *data;
767	struct reg_sequence patch[2];
768	unsigned int rval[BLOCK_TEST_SIZE], val;
769	int i;
770
771	/* We need defaults so readback works */
772	config = test_regmap_config;
773	config.cache_type = t->type;
774	config.num_reg_defaults = BLOCK_TEST_SIZE;
775
776	map = gen_regmap(&config, &data);
777	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
778	if (IS_ERR(map))
779		return;
780
781	/* Stash the original values */
782	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
783						  BLOCK_TEST_SIZE));
784
785	/* Patch a couple of values */
786	patch[0].reg = 2;
787	patch[0].def = rval[2] + 1;
788	patch[0].delay_us = 0;
789	patch[1].reg = 5;
790	patch[1].def = rval[5] + 1;
791	patch[1].delay_us = 0;
792	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
793						       ARRAY_SIZE(patch)));
794
795	/* Sync the cache */
796	regcache_mark_dirty(map);
797	for (i = 0; i < BLOCK_TEST_SIZE; i++)
798		data->written[i] = false;
799	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
800
801	/* The patch should be on the device but not in the cache */
802	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
803		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
804		KUNIT_EXPECT_EQ(test, val, rval[i]);
805
806		switch (i) {
807		case 2:
808		case 5:
809			KUNIT_EXPECT_EQ(test, true, data->written[i]);
810			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
811			break;
812		default:
813			KUNIT_EXPECT_EQ(test, false, data->written[i]);
814			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
815			break;
816		}
817	}
818
819	regmap_exit(map);
820}
821
822static void cache_drop(struct kunit *test)
823{
824	struct regcache_types *t = (struct regcache_types *)test->param_value;
825	struct regmap *map;
826	struct regmap_config config;
827	struct regmap_ram_data *data;
828	unsigned int rval[BLOCK_TEST_SIZE];
829	int i;
830
831	config = test_regmap_config;
832	config.cache_type = t->type;
833	config.num_reg_defaults = BLOCK_TEST_SIZE;
834
835	map = gen_regmap(&config, &data);
836	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
837	if (IS_ERR(map))
838		return;
839
840	/* Ensure the data is read from the cache */
841	for (i = 0; i < BLOCK_TEST_SIZE; i++)
842		data->read[i] = false;
843	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
844						  BLOCK_TEST_SIZE));
845	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
846		KUNIT_EXPECT_FALSE(test, data->read[i]);
847		data->read[i] = false;
848	}
849	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
850
851	/* Drop some registers */
852	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
853
854	/* Reread and check only the dropped registers hit the device. */
855	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
856						  BLOCK_TEST_SIZE));
857	for (i = 0; i < BLOCK_TEST_SIZE; i++)
858		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
859	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
860
861	regmap_exit(map);
862}
863
864static void cache_present(struct kunit *test)
865{
866	struct regcache_types *t = (struct regcache_types *)test->param_value;
867	struct regmap *map;
868	struct regmap_config config;
869	struct regmap_ram_data *data;
870	unsigned int val;
871	int i;
872
873	config = test_regmap_config;
874	config.cache_type = t->type;
875
876	map = gen_regmap(&config, &data);
877	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
878	if (IS_ERR(map))
879		return;
880
881	for (i = 0; i < BLOCK_TEST_SIZE; i++)
882		data->read[i] = false;
883
884	/* No defaults so no registers cached. */
885	for (i = 0; i < BLOCK_TEST_SIZE; i++)
886		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
887
888	/* We didn't trigger any reads */
889	for (i = 0; i < BLOCK_TEST_SIZE; i++)
890		KUNIT_ASSERT_FALSE(test, data->read[i]);
891
892	/* Fill the cache */
893	for (i = 0; i < BLOCK_TEST_SIZE; i++)
894		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
895
896	/* Now everything should be cached */
897	for (i = 0; i < BLOCK_TEST_SIZE; i++)
898		KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
899
900	regmap_exit(map);
901}
902
903/* Check that caching the window register works with sync */
904static void cache_range_window_reg(struct kunit *test)
905{
906	struct regcache_types *t = (struct regcache_types *)test->param_value;
907	struct regmap *map;
908	struct regmap_config config;
909	struct regmap_ram_data *data;
910	unsigned int val;
911	int i;
912
913	config = test_regmap_config;
914	config.cache_type = t->type;
915	config.volatile_reg = test_range_window_volatile;
916	config.ranges = &test_range;
917	config.num_ranges = 1;
918	config.max_register = test_range.range_max;
919
920	map = gen_regmap(&config, &data);
921	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
922	if (IS_ERR(map))
923		return;
924
925	/* Write new values to the entire range */
926	for (i = test_range.range_min; i <= test_range.range_max; i++)
927		KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
928
929	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
930	KUNIT_ASSERT_EQ(test, val, 2);
931
932	/* Write to the first register in the range to reset the page */
933	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
934	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
935	KUNIT_ASSERT_EQ(test, val, 0);
936
937	/* Trigger a cache sync */
938	regcache_mark_dirty(map);
939	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
940
941	/* Write to the first register again, the page should be reset */
942	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
943	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
944	KUNIT_ASSERT_EQ(test, val, 0);
945
946	/* Trigger another cache sync */
947	regcache_mark_dirty(map);
948	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
949
950	/* Write to the last register again, the page should be reset */
951	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
952	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
953	KUNIT_ASSERT_EQ(test, val, 2);
954}
955
956struct raw_test_types {
957	const char *name;
958
959	enum regcache_type cache_type;
960	enum regmap_endian val_endian;
961};
962
963static void raw_to_desc(const struct raw_test_types *t, char *desc)
964{
965	strcpy(desc, t->name);
966}
967
968static const struct raw_test_types raw_types_list[] = {
969	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
970	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
971	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
972	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
973	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
974	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
975	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
976	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
977};
978
979KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
980
981static const struct raw_test_types raw_cache_types_list[] = {
982	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
983	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
984	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
985	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
986	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
987	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
988};
989
990KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
991
992static const struct regmap_config raw_regmap_config = {
993	.max_register = BLOCK_TEST_SIZE,
994
995	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
996	.reg_bits = 16,
997	.val_bits = 16,
998};
999
1000static struct regmap *gen_raw_regmap(struct regmap_config *config,
1001				     struct raw_test_types *test_type,
1002				     struct regmap_ram_data **data)
1003{
1004	u16 *buf;
1005	struct regmap *ret;
1006	size_t size = (config->max_register + 1) * config->reg_bits / 8;
1007	int i;
1008	struct reg_default *defaults;
1009
1010	config->cache_type = test_type->cache_type;
1011	config->val_format_endian = test_type->val_endian;
1012	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1013					config->cache_type == REGCACHE_MAPLE;
1014
1015	buf = kmalloc(size, GFP_KERNEL);
1016	if (!buf)
1017		return ERR_PTR(-ENOMEM);
1018
1019	get_random_bytes(buf, size);
1020
1021	*data = kzalloc(sizeof(**data), GFP_KERNEL);
1022	if (!(*data))
1023		return ERR_PTR(-ENOMEM);
1024	(*data)->vals = (void *)buf;
1025
1026	config->num_reg_defaults = config->max_register + 1;
1027	defaults = kcalloc(config->num_reg_defaults,
1028			   sizeof(struct reg_default),
1029			   GFP_KERNEL);
1030	if (!defaults)
1031		return ERR_PTR(-ENOMEM);
1032	config->reg_defaults = defaults;
1033
1034	for (i = 0; i < config->num_reg_defaults; i++) {
1035		defaults[i].reg = i;
1036		switch (test_type->val_endian) {
1037		case REGMAP_ENDIAN_LITTLE:
1038			defaults[i].def = le16_to_cpu(buf[i]);
1039			break;
1040		case REGMAP_ENDIAN_BIG:
1041			defaults[i].def = be16_to_cpu(buf[i]);
1042			break;
1043		default:
1044			return ERR_PTR(-EINVAL);
1045		}
1046	}
1047
1048	/*
1049	 * We use the defaults in the tests but they don't make sense
1050	 * to the core if there's no cache.
1051	 */
1052	if (config->cache_type == REGCACHE_NONE)
1053		config->num_reg_defaults = 0;
1054
1055	ret = regmap_init_raw_ram(config, *data);
1056	if (IS_ERR(ret)) {
1057		kfree(buf);
1058		kfree(*data);
1059	}
1060
1061	return ret;
1062}
1063
1064static void raw_read_defaults_single(struct kunit *test)
1065{
1066	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1067	struct regmap *map;
1068	struct regmap_config config;
1069	struct regmap_ram_data *data;
1070	unsigned int rval;
1071	int i;
1072
1073	config = raw_regmap_config;
1074
1075	map = gen_raw_regmap(&config, t, &data);
1076	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1077	if (IS_ERR(map))
1078		return;
1079
1080	/* Check that we can read the defaults via the API */
1081	for (i = 0; i < config.max_register + 1; i++) {
1082		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1083		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1084	}
1085
1086	regmap_exit(map);
1087}
1088
1089static void raw_read_defaults(struct kunit *test)
1090{
1091	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1092	struct regmap *map;
1093	struct regmap_config config;
1094	struct regmap_ram_data *data;
1095	u16 *rval;
1096	u16 def;
1097	size_t val_len;
1098	int i;
1099
1100	config = raw_regmap_config;
1101
1102	map = gen_raw_regmap(&config, t, &data);
1103	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1104	if (IS_ERR(map))
1105		return;
1106
1107	val_len = sizeof(*rval) * (config.max_register + 1);
1108	rval = kmalloc(val_len, GFP_KERNEL);
1109	KUNIT_ASSERT_TRUE(test, rval != NULL);
1110	if (!rval)
1111		return;
1112
1113	/* Check that we can read the defaults via the API */
1114	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1115	for (i = 0; i < config.max_register + 1; i++) {
1116		def = config.reg_defaults[i].def;
1117		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1118			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1119		} else {
1120			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1121		}
1122	}
1123
1124	kfree(rval);
1125	regmap_exit(map);
1126}
1127
1128static void raw_write_read_single(struct kunit *test)
1129{
1130	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1131	struct regmap *map;
1132	struct regmap_config config;
1133	struct regmap_ram_data *data;
1134	u16 val;
1135	unsigned int rval;
1136
1137	config = raw_regmap_config;
1138
1139	map = gen_raw_regmap(&config, t, &data);
1140	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1141	if (IS_ERR(map))
1142		return;
1143
1144	get_random_bytes(&val, sizeof(val));
1145
1146	/* If we write a value to a register we can read it back */
1147	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1148	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1149	KUNIT_EXPECT_EQ(test, val, rval);
1150
1151	regmap_exit(map);
1152}
1153
1154static void raw_write(struct kunit *test)
1155{
1156	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1157	struct regmap *map;
1158	struct regmap_config config;
1159	struct regmap_ram_data *data;
1160	u16 *hw_buf;
1161	u16 val[2];
1162	unsigned int rval;
1163	int i;
1164
1165	config = raw_regmap_config;
1166
1167	map = gen_raw_regmap(&config, t, &data);
1168	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1169	if (IS_ERR(map))
1170		return;
1171
1172	hw_buf = (u16 *)data->vals;
1173
1174	get_random_bytes(&val, sizeof(val));
1175
1176	/* Do a raw write */
1177	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1178
1179	/* We should read back the new values, and defaults for the rest */
1180	for (i = 0; i < config.max_register + 1; i++) {
1181		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1182
1183		switch (i) {
1184		case 2:
1185		case 3:
1186			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1187				KUNIT_EXPECT_EQ(test, rval,
1188						be16_to_cpu(val[i % 2]));
1189			} else {
1190				KUNIT_EXPECT_EQ(test, rval,
1191						le16_to_cpu(val[i % 2]));
1192			}
1193			break;
1194		default:
1195			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1196			break;
1197		}
1198	}
1199
1200	/* The values should appear in the "hardware" */
1201	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1202
1203	regmap_exit(map);
1204}
1205
1206static bool reg_zero(struct device *dev, unsigned int reg)
1207{
1208	return reg == 0;
1209}
1210
1211static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1212{
1213	return reg == 0;
1214}
1215
1216static void raw_noinc_write(struct kunit *test)
1217{
1218	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1219	struct regmap *map;
1220	struct regmap_config config;
1221	struct regmap_ram_data *data;
1222	unsigned int val;
1223	u16 val_test, val_last;
1224	u16 val_array[BLOCK_TEST_SIZE];
1225
1226	config = raw_regmap_config;
1227	config.volatile_reg = reg_zero;
1228	config.writeable_noinc_reg = reg_zero;
1229	config.readable_noinc_reg = reg_zero;
1230
1231	map = gen_raw_regmap(&config, t, &data);
1232	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1233	if (IS_ERR(map))
1234		return;
1235
1236	data->noinc_reg = ram_reg_zero;
1237
1238	get_random_bytes(&val_array, sizeof(val_array));
1239
1240	if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1241		val_test = be16_to_cpu(val_array[1]) + 100;
1242		val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1243	} else {
1244		val_test = le16_to_cpu(val_array[1]) + 100;
1245		val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1246	}
1247
1248	/* Put some data into the register following the noinc register */
1249	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1250
1251	/* Write some data to the noinc register */
1252	KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1253						    sizeof(val_array)));
1254
1255	/* We should read back the last value written */
1256	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1257	KUNIT_ASSERT_EQ(test, val_last, val);
1258
1259	/* Make sure we didn't touch the register after the noinc register */
1260	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1261	KUNIT_ASSERT_EQ(test, val_test, val);
1262
1263	regmap_exit(map);
1264}
1265
1266static void raw_sync(struct kunit *test)
1267{
1268	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1269	struct regmap *map;
1270	struct regmap_config config;
1271	struct regmap_ram_data *data;
1272	u16 val[3];
1273	u16 *hw_buf;
1274	unsigned int rval;
1275	int i;
1276
1277	config = raw_regmap_config;
1278
1279	map = gen_raw_regmap(&config, t, &data);
1280	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1281	if (IS_ERR(map))
1282		return;
1283
1284	hw_buf = (u16 *)data->vals;
1285
1286	get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1287
1288	/* Do a regular write and a raw write in cache only mode */
1289	regcache_cache_only(map, true);
1290	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1291						  sizeof(u16) * 2));
1292	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1293
1294	/* We should read back the new values, and defaults for the rest */
1295	for (i = 0; i < config.max_register + 1; i++) {
1296		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1297
1298		switch (i) {
1299		case 2:
1300		case 3:
1301			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1302				KUNIT_EXPECT_EQ(test, rval,
1303						be16_to_cpu(val[i - 2]));
1304			} else {
1305				KUNIT_EXPECT_EQ(test, rval,
1306						le16_to_cpu(val[i - 2]));
1307			}
1308			break;
1309		case 4:
1310			KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1311			break;
1312		default:
1313			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1314			break;
1315		}
1316	}
1317
1318	/*
1319	 * The value written via _write() was translated by the core,
1320	 * translate the original copy for comparison purposes.
1321	 */
1322	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1323		val[2] = cpu_to_be16(val[2]);
1324	else
1325		val[2] = cpu_to_le16(val[2]);
1326
1327	/* The values should not appear in the "hardware" */
1328	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1329
1330	for (i = 0; i < config.max_register + 1; i++)
1331		data->written[i] = false;
1332
1333	/* Do the sync */
1334	regcache_cache_only(map, false);
1335	regcache_mark_dirty(map);
1336	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1337
1338	/* The values should now appear in the "hardware" */
1339	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1340
1341	regmap_exit(map);
1342}
1343
1344static void raw_ranges(struct kunit *test)
1345{
1346	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1347	struct regmap *map;
1348	struct regmap_config config;
1349	struct regmap_ram_data *data;
1350	unsigned int val;
1351	int i;
1352
1353	config = raw_regmap_config;
1354	config.volatile_reg = test_range_all_volatile;
1355	config.ranges = &test_range;
1356	config.num_ranges = 1;
1357	config.max_register = test_range.range_max;
1358
1359	map = gen_raw_regmap(&config, t, &data);
1360	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1361	if (IS_ERR(map))
1362		return;
1363
1364	/* Reset the page to a non-zero value to trigger a change */
1365	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1366					      test_range.range_max));
1367
1368	/* Check we set the page and use the window for writes */
1369	data->written[test_range.selector_reg] = false;
1370	data->written[test_range.window_start] = false;
1371	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1372	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1373	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1374
1375	data->written[test_range.selector_reg] = false;
1376	data->written[test_range.window_start] = false;
1377	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1378					      test_range.range_min +
1379					      test_range.window_len,
1380					      0));
1381	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1382	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1383
1384	/* Same for reads */
1385	data->written[test_range.selector_reg] = false;
1386	data->read[test_range.window_start] = false;
1387	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
1388	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1389	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1390
1391	data->written[test_range.selector_reg] = false;
1392	data->read[test_range.window_start] = false;
1393	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
1394					     test_range.range_min +
1395					     test_range.window_len,
1396					     &val));
1397	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1398	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1399
1400	/* No physical access triggered in the virtual range */
1401	for (i = test_range.range_min; i < test_range.range_max; i++) {
1402		KUNIT_EXPECT_FALSE(test, data->read[i]);
1403		KUNIT_EXPECT_FALSE(test, data->written[i]);
1404	}
1405
1406	regmap_exit(map);
1407}
1408
1409static struct kunit_case regmap_test_cases[] = {
1410	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1411	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1412	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1413	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1414	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1415	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1416	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1417	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1418	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1419	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1420	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1421	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1422	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1423	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1424	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1425	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1426	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1427	KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1428	KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
1429
1430	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1431	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1432	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1433	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1434	KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
1435	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1436	KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
1437	{}
1438};
1439
1440static struct kunit_suite regmap_test_suite = {
1441	.name = "regmap",
1442	.test_cases = regmap_test_cases,
1443};
1444kunit_test_suite(regmap_test_suite);
1445
1446MODULE_LICENSE("GPL v2");
1447