1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/init.h>
4#include <linux/log2.h>
5#include <kunit/test.h>
6
7#include <asm/guest-state-buffer.h>
8
9static void test_creating_buffer(struct kunit *test)
10{
11	struct kvmppc_gs_buff *gsb;
12	size_t size = 0x100;
13
14	gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
15	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
16
17	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
18
19	KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
20	KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
21
22	kvmppc_gsb_free(gsb);
23}
24
25static void test_adding_element(struct kunit *test)
26{
27	const struct kvmppc_gs_elem *head, *curr;
28	union {
29		__vector128 v;
30		u64 dw[2];
31	} u;
32	int rem;
33	struct kvmppc_gs_buff *gsb;
34	size_t size = 0x1000;
35	int i, rc;
36	u64 data;
37
38	gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
39	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
40
41	/* Single elements, direct use of __kvmppc_gse_put() */
42	data = 0xdeadbeef;
43	rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
44	KUNIT_EXPECT_GE(test, rc, 0);
45
46	head = kvmppc_gsb_data(gsb);
47	KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
48	KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
49	data = 0;
50	memcpy(&data, kvmppc_gse_data(head), 8);
51	KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
52
53	/* Multiple elements, simple wrapper */
54	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
55	KUNIT_EXPECT_GE(test, rc, 0);
56
57	u.dw[0] = 0x1;
58	u.dw[1] = 0x2;
59	rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
60	KUNIT_EXPECT_GE(test, rc, 0);
61	u.dw[0] = 0x0;
62	u.dw[1] = 0x0;
63
64	kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
65		switch (i) {
66		case 0:
67			KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
68					KVMPPC_GSID_GPR(0));
69			KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
70			KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
71					0xdeadbeef);
72			break;
73		case 1:
74			KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
75					KVMPPC_GSID_GPR(1));
76			KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
77			KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
78					0xcafef00d);
79			break;
80		case 2:
81			KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
82					KVMPPC_GSID_VSRS(0));
83			KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
84			kvmppc_gse_get_vector128(curr, &u.v);
85			KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
86			KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
87			break;
88		}
89	}
90	KUNIT_EXPECT_EQ(test, i, 3);
91
92	kvmppc_gsb_reset(gsb);
93	KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
94	KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
95			sizeof(struct kvmppc_gs_header));
96
97	kvmppc_gsb_free(gsb);
98}
99
100static void test_gs_parsing(struct kunit *test)
101{
102	struct kvmppc_gs_elem *gse;
103	struct kvmppc_gs_parser gsp = { 0 };
104	struct kvmppc_gs_buff *gsb;
105	size_t size = 0x1000;
106	u64 tmp1, tmp2;
107
108	gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
109	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
110
111	tmp1 = 0xdeadbeefull;
112	kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
113
114	KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
115
116	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
117	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
118
119	tmp2 = kvmppc_gse_get_u64(gse);
120	KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
121
122	kvmppc_gsb_free(gsb);
123}
124
125static void test_gs_bitmap(struct kunit *test)
126{
127	struct kvmppc_gs_bitmap gsbm = { 0 };
128	struct kvmppc_gs_bitmap gsbm1 = { 0 };
129	struct kvmppc_gs_bitmap gsbm2 = { 0 };
130	u16 iden;
131	int i, j;
132
133	i = 0;
134	for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
135	     iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
136		kvmppc_gsbm_set(&gsbm, iden);
137		kvmppc_gsbm_set(&gsbm1, iden);
138		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
139		kvmppc_gsbm_clear(&gsbm, iden);
140		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
141		i++;
142	}
143
144	for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
145	     iden++) {
146		kvmppc_gsbm_set(&gsbm, iden);
147		kvmppc_gsbm_set(&gsbm1, iden);
148		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
149		kvmppc_gsbm_clear(&gsbm, iden);
150		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
151		i++;
152	}
153
154	for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
155		kvmppc_gsbm_set(&gsbm, iden);
156		kvmppc_gsbm_set(&gsbm1, iden);
157		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
158		kvmppc_gsbm_clear(&gsbm, iden);
159		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
160		i++;
161	}
162
163	for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
164		kvmppc_gsbm_set(&gsbm, iden);
165		kvmppc_gsbm_set(&gsbm1, iden);
166		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
167		kvmppc_gsbm_clear(&gsbm, iden);
168		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
169		i++;
170	}
171
172	for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
173	     iden++) {
174		kvmppc_gsbm_set(&gsbm, iden);
175		kvmppc_gsbm_set(&gsbm1, iden);
176		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
177		kvmppc_gsbm_clear(&gsbm, iden);
178		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
179		i++;
180	}
181
182	for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
183		kvmppc_gsbm_set(&gsbm, iden);
184		kvmppc_gsbm_set(&gsbm1, iden);
185		KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
186		kvmppc_gsbm_clear(&gsbm, iden);
187		KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
188		i++;
189	}
190
191	j = 0;
192	kvmppc_gsbm_for_each(&gsbm1, iden)
193	{
194		kvmppc_gsbm_set(&gsbm2, iden);
195		j++;
196	}
197	KUNIT_EXPECT_EQ(test, i, j);
198	KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
199}
200
201struct kvmppc_gs_msg_test1_data {
202	u64 a;
203	u32 b;
204	struct kvmppc_gs_part_table c;
205	struct kvmppc_gs_proc_table d;
206	struct kvmppc_gs_buff_info e;
207};
208
209static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
210{
211	size_t size = 0;
212	u16 ids[] = {
213		KVMPPC_GSID_PARTITION_TABLE,
214		KVMPPC_GSID_PROCESS_TABLE,
215		KVMPPC_GSID_RUN_INPUT,
216		KVMPPC_GSID_GPR(0),
217		KVMPPC_GSID_CR,
218	};
219
220	for (int i = 0; i < ARRAY_SIZE(ids); i++)
221		size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
222	return size;
223}
224
225static int test1_fill_info(struct kvmppc_gs_buff *gsb,
226			   struct kvmppc_gs_msg *gsm)
227{
228	struct kvmppc_gs_msg_test1_data *data = gsm->data;
229
230	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
231		kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
232
233	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
234		kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
235
236	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
237		kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
238					  data->c);
239
240	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
241		kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
242					  data->d);
243
244	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
245		kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
246
247	return 0;
248}
249
250static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
251			      struct kvmppc_gs_buff *gsb)
252{
253	struct kvmppc_gs_parser gsp = { 0 };
254	struct kvmppc_gs_msg_test1_data *data = gsm->data;
255	struct kvmppc_gs_elem *gse;
256	int rc;
257
258	rc = kvmppc_gse_parse(&gsp, gsb);
259	if (rc < 0)
260		return rc;
261
262	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
263	if (gse)
264		data->a = kvmppc_gse_get_u64(gse);
265
266	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
267	if (gse)
268		data->b = kvmppc_gse_get_u32(gse);
269
270	return 0;
271}
272
273static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
274	.get_size = test1_get_size,
275	.fill_info = test1_fill_info,
276	.refresh_info = test1_refresh_info,
277};
278
279static void test_gs_msg(struct kunit *test)
280{
281	struct kvmppc_gs_msg_test1_data test1_data = {
282		.a = 0xdeadbeef,
283		.b = 0x1,
284	};
285	struct kvmppc_gs_msg *gsm;
286	struct kvmppc_gs_buff *gsb;
287
288	gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
289			     GFP_KERNEL);
290	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
291
292	gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
293	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
294
295	kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
296	kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
297	kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
298	kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
299	kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
300
301	kvmppc_gsm_fill_info(gsm, gsb);
302
303	memset(&test1_data, 0, sizeof(test1_data));
304
305	kvmppc_gsm_refresh_info(gsm, gsb);
306	KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
307	KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
308
309	kvmppc_gsm_free(gsm);
310}
311
312static struct kunit_case guest_state_buffer_testcases[] = {
313	KUNIT_CASE(test_creating_buffer),
314	KUNIT_CASE(test_adding_element),
315	KUNIT_CASE(test_gs_bitmap),
316	KUNIT_CASE(test_gs_parsing),
317	KUNIT_CASE(test_gs_msg),
318	{}
319};
320
321static struct kunit_suite guest_state_buffer_test_suite = {
322	.name = "guest_state_buffer_test",
323	.test_cases = guest_state_buffer_testcases,
324};
325
326kunit_test_suites(&guest_state_buffer_test_suite);
327
328MODULE_LICENSE("GPL");
329