1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
6#include <dm.h>
7#include <lmb.h>
8#include <log.h>
9#include <malloc.h>
10#include <dm/test.h>
11#include <test/lib.h>
12#include <test/test.h>
13#include <test/ut.h>
14
15static inline bool lmb_is_nomap(struct lmb_property *m)
16{
17	return m->flags & LMB_NOMAP;
18}
19
20static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
21		     phys_addr_t ram_base, phys_size_t ram_size,
22		     unsigned long num_reserved,
23		     phys_addr_t base1, phys_size_t size1,
24		     phys_addr_t base2, phys_size_t size2,
25		     phys_addr_t base3, phys_size_t size3)
26{
27	if (ram_size) {
28		ut_asserteq(lmb->memory.cnt, 1);
29		ut_asserteq(lmb->memory.region[0].base, ram_base);
30		ut_asserteq(lmb->memory.region[0].size, ram_size);
31	}
32
33	ut_asserteq(lmb->reserved.cnt, num_reserved);
34	if (num_reserved > 0) {
35		ut_asserteq(lmb->reserved.region[0].base, base1);
36		ut_asserteq(lmb->reserved.region[0].size, size1);
37	}
38	if (num_reserved > 1) {
39		ut_asserteq(lmb->reserved.region[1].base, base2);
40		ut_asserteq(lmb->reserved.region[1].size, size2);
41	}
42	if (num_reserved > 2) {
43		ut_asserteq(lmb->reserved.region[2].base, base3);
44		ut_asserteq(lmb->reserved.region[2].size, size3);
45	}
46	return 0;
47}
48
49#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
50		   base2, size2, base3, size3) \
51		   ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
52			     num_reserved, base1, size1, base2, size2, base3, \
53			     size3))
54
55/*
56 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
57 * then does some alloc + free tests.
58 */
59static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
60			    const phys_size_t ram_size, const phys_addr_t ram0,
61			    const phys_size_t ram0_size,
62			    const phys_addr_t alloc_64k_addr)
63{
64	const phys_addr_t ram_end = ram + ram_size;
65	const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
66
67	struct lmb lmb;
68	long ret;
69	phys_addr_t a, a2, b, b2, c, d;
70
71	/* check for overflow */
72	ut_assert(ram_end == 0 || ram_end > ram);
73	ut_assert(alloc_64k_end > alloc_64k_addr);
74	/* check input addresses + size */
75	ut_assert(alloc_64k_addr >= ram + 8);
76	ut_assert(alloc_64k_end <= ram_end - 8);
77
78	lmb_init(&lmb);
79
80	if (ram0_size) {
81		ret = lmb_add(&lmb, ram0, ram0_size);
82		ut_asserteq(ret, 0);
83	}
84
85	ret = lmb_add(&lmb, ram, ram_size);
86	ut_asserteq(ret, 0);
87
88	if (ram0_size) {
89		ut_asserteq(lmb.memory.cnt, 2);
90		ut_asserteq(lmb.memory.region[0].base, ram0);
91		ut_asserteq(lmb.memory.region[0].size, ram0_size);
92		ut_asserteq(lmb.memory.region[1].base, ram);
93		ut_asserteq(lmb.memory.region[1].size, ram_size);
94	} else {
95		ut_asserteq(lmb.memory.cnt, 1);
96		ut_asserteq(lmb.memory.region[0].base, ram);
97		ut_asserteq(lmb.memory.region[0].size, ram_size);
98	}
99
100	/* reserve 64KiB somewhere */
101	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
102	ut_asserteq(ret, 0);
103	ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
104		   0, 0, 0, 0);
105
106	/* allocate somewhere, should be at the end of RAM */
107	a = lmb_alloc(&lmb, 4, 1);
108	ut_asserteq(a, ram_end - 4);
109	ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
110		   ram_end - 4, 4, 0, 0);
111	/* alloc below end of reserved region -> below reserved region */
112	b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
113	ut_asserteq(b, alloc_64k_addr - 4);
114	ASSERT_LMB(&lmb, 0, 0, 2,
115		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
116
117	/* 2nd time */
118	c = lmb_alloc(&lmb, 4, 1);
119	ut_asserteq(c, ram_end - 8);
120	ASSERT_LMB(&lmb, 0, 0, 2,
121		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
122	d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
123	ut_asserteq(d, alloc_64k_addr - 8);
124	ASSERT_LMB(&lmb, 0, 0, 2,
125		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
126
127	ret = lmb_free(&lmb, a, 4);
128	ut_asserteq(ret, 0);
129	ASSERT_LMB(&lmb, 0, 0, 2,
130		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
131	/* allocate again to ensure we get the same address */
132	a2 = lmb_alloc(&lmb, 4, 1);
133	ut_asserteq(a, a2);
134	ASSERT_LMB(&lmb, 0, 0, 2,
135		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
136	ret = lmb_free(&lmb, a2, 4);
137	ut_asserteq(ret, 0);
138	ASSERT_LMB(&lmb, 0, 0, 2,
139		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
140
141	ret = lmb_free(&lmb, b, 4);
142	ut_asserteq(ret, 0);
143	ASSERT_LMB(&lmb, 0, 0, 3,
144		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
145		   ram_end - 8, 4);
146	/* allocate again to ensure we get the same address */
147	b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
148	ut_asserteq(b, b2);
149	ASSERT_LMB(&lmb, 0, 0, 2,
150		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
151	ret = lmb_free(&lmb, b2, 4);
152	ut_asserteq(ret, 0);
153	ASSERT_LMB(&lmb, 0, 0, 3,
154		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
155		   ram_end - 8, 4);
156
157	ret = lmb_free(&lmb, c, 4);
158	ut_asserteq(ret, 0);
159	ASSERT_LMB(&lmb, 0, 0, 2,
160		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
161	ret = lmb_free(&lmb, d, 4);
162	ut_asserteq(ret, 0);
163	ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
164		   0, 0, 0, 0);
165
166	if (ram0_size) {
167		ut_asserteq(lmb.memory.cnt, 2);
168		ut_asserteq(lmb.memory.region[0].base, ram0);
169		ut_asserteq(lmb.memory.region[0].size, ram0_size);
170		ut_asserteq(lmb.memory.region[1].base, ram);
171		ut_asserteq(lmb.memory.region[1].size, ram_size);
172	} else {
173		ut_asserteq(lmb.memory.cnt, 1);
174		ut_asserteq(lmb.memory.region[0].base, ram);
175		ut_asserteq(lmb.memory.region[0].size, ram_size);
176	}
177
178	return 0;
179}
180
181static int test_multi_alloc_512mb(struct unit_test_state *uts,
182				  const phys_addr_t ram)
183{
184	return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
185}
186
187static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
188				     const phys_addr_t ram,
189				     const phys_addr_t ram0)
190{
191	return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
192				ram + 0x10000000);
193}
194
195/* Create a memory region with one reserved region and allocate */
196static int lib_test_lmb_simple(struct unit_test_state *uts)
197{
198	int ret;
199
200	/* simulate 512 MiB RAM beginning at 1GiB */
201	ret = test_multi_alloc_512mb(uts, 0x40000000);
202	if (ret)
203		return ret;
204
205	/* simulate 512 MiB RAM beginning at 1.5GiB */
206	return test_multi_alloc_512mb(uts, 0xE0000000);
207}
208LIB_TEST(lib_test_lmb_simple, 0);
209
210/* Create two memory regions with one reserved region and allocate */
211static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
212{
213	int ret;
214
215	/* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
216	ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
217	if (ret)
218		return ret;
219
220	/* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
221	return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
222}
223LIB_TEST(lib_test_lmb_simple_x2, 0);
224
225/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
226static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
227{
228	const phys_size_t ram_size = 0x20000000;
229	const phys_size_t big_block_size = 0x10000000;
230	const phys_addr_t ram_end = ram + ram_size;
231	const phys_addr_t alloc_64k_addr = ram + 0x10000000;
232	struct lmb lmb;
233	long ret;
234	phys_addr_t a, b;
235
236	/* check for overflow */
237	ut_assert(ram_end == 0 || ram_end > ram);
238
239	lmb_init(&lmb);
240
241	ret = lmb_add(&lmb, ram, ram_size);
242	ut_asserteq(ret, 0);
243
244	/* reserve 64KiB in the middle of RAM */
245	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
246	ut_asserteq(ret, 0);
247	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
248		   0, 0, 0, 0);
249
250	/* allocate a big block, should be below reserved */
251	a = lmb_alloc(&lmb, big_block_size, 1);
252	ut_asserteq(a, ram);
253	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
254		   big_block_size + 0x10000, 0, 0, 0, 0);
255	/* allocate 2nd big block */
256	/* This should fail, printing an error */
257	b = lmb_alloc(&lmb, big_block_size, 1);
258	ut_asserteq(b, 0);
259	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
260		   big_block_size + 0x10000, 0, 0, 0, 0);
261
262	ret = lmb_free(&lmb, a, big_block_size);
263	ut_asserteq(ret, 0);
264	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
265		   0, 0, 0, 0);
266
267	/* allocate too big block */
268	/* This should fail, printing an error */
269	a = lmb_alloc(&lmb, ram_size, 1);
270	ut_asserteq(a, 0);
271	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
272		   0, 0, 0, 0);
273
274	return 0;
275}
276
277static int lib_test_lmb_big(struct unit_test_state *uts)
278{
279	int ret;
280
281	/* simulate 512 MiB RAM beginning at 1GiB */
282	ret = test_bigblock(uts, 0x40000000);
283	if (ret)
284		return ret;
285
286	/* simulate 512 MiB RAM beginning at 1.5GiB */
287	return test_bigblock(uts, 0xE0000000);
288}
289LIB_TEST(lib_test_lmb_big, 0);
290
291/* Simulate 512 MiB RAM, allocate a block without previous reservation */
292static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
293			   const phys_addr_t alloc_size, const ulong align)
294{
295	const phys_size_t ram_size = 0x20000000;
296	const phys_addr_t ram_end = ram + ram_size;
297	struct lmb lmb;
298	long ret;
299	phys_addr_t a, b;
300	const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
301		~(align - 1);
302
303	/* check for overflow */
304	ut_assert(ram_end == 0 || ram_end > ram);
305
306	lmb_init(&lmb);
307
308	ret = lmb_add(&lmb, ram, ram_size);
309	ut_asserteq(ret, 0);
310	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
311
312	/* allocate a block */
313	a = lmb_alloc(&lmb, alloc_size, align);
314	ut_assert(a != 0);
315	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
316		   alloc_size, 0, 0, 0, 0);
317	/* allocate another block */
318	b = lmb_alloc(&lmb, alloc_size, align);
319	ut_assert(b != 0);
320	if (alloc_size == alloc_size_aligned) {
321		ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
322			   (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
323			   0);
324	} else {
325		ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
326			   (alloc_size_aligned * 2), alloc_size, ram + ram_size
327			   - alloc_size_aligned, alloc_size, 0, 0);
328	}
329	/* and free them */
330	ret = lmb_free(&lmb, b, alloc_size);
331	ut_asserteq(ret, 0);
332	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
333		   alloc_size, 0, 0, 0, 0);
334	ret = lmb_free(&lmb, a, alloc_size);
335	ut_asserteq(ret, 0);
336	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
337
338	/* allocate a block with base*/
339	b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
340	ut_assert(a == b);
341	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
342		   alloc_size, 0, 0, 0, 0);
343	/* and free it */
344	ret = lmb_free(&lmb, b, alloc_size);
345	ut_asserteq(ret, 0);
346	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
347
348	return 0;
349}
350
351static int lib_test_lmb_noreserved(struct unit_test_state *uts)
352{
353	int ret;
354
355	/* simulate 512 MiB RAM beginning at 1GiB */
356	ret = test_noreserved(uts, 0x40000000, 4, 1);
357	if (ret)
358		return ret;
359
360	/* simulate 512 MiB RAM beginning at 1.5GiB */
361	return test_noreserved(uts, 0xE0000000, 4, 1);
362}
363
364LIB_TEST(lib_test_lmb_noreserved, 0);
365
366static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
367{
368	int ret;
369
370	/* simulate 512 MiB RAM beginning at 1GiB */
371	ret = test_noreserved(uts, 0x40000000, 5, 8);
372	if (ret)
373		return ret;
374
375	/* simulate 512 MiB RAM beginning at 1.5GiB */
376	return test_noreserved(uts, 0xE0000000, 5, 8);
377}
378LIB_TEST(lib_test_lmb_unaligned_size, 0);
379
380/*
381 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
382 * fail as '0' means failure for the lmb_alloc functions.
383 */
384static int lib_test_lmb_at_0(struct unit_test_state *uts)
385{
386	const phys_addr_t ram = 0;
387	const phys_size_t ram_size = 0x20000000;
388	struct lmb lmb;
389	long ret;
390	phys_addr_t a, b;
391
392	lmb_init(&lmb);
393
394	ret = lmb_add(&lmb, ram, ram_size);
395	ut_asserteq(ret, 0);
396
397	/* allocate nearly everything */
398	a = lmb_alloc(&lmb, ram_size - 4, 1);
399	ut_asserteq(a, ram + 4);
400	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
401		   0, 0, 0, 0);
402	/* allocate the rest */
403	/* This should fail as the allocated address would be 0 */
404	b = lmb_alloc(&lmb, 4, 1);
405	ut_asserteq(b, 0);
406	/* check that this was an error by checking lmb */
407	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
408		   0, 0, 0, 0);
409	/* check that this was an error by freeing b */
410	ret = lmb_free(&lmb, b, 4);
411	ut_asserteq(ret, -1);
412	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
413		   0, 0, 0, 0);
414
415	ret = lmb_free(&lmb, a, ram_size - 4);
416	ut_asserteq(ret, 0);
417	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
418
419	return 0;
420}
421LIB_TEST(lib_test_lmb_at_0, 0);
422
423/* Check that calling lmb_reserve with overlapping regions fails. */
424static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
425{
426	const phys_addr_t ram = 0x40000000;
427	const phys_size_t ram_size = 0x20000000;
428	struct lmb lmb;
429	long ret;
430
431	lmb_init(&lmb);
432
433	ret = lmb_add(&lmb, ram, ram_size);
434	ut_asserteq(ret, 0);
435
436	ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
437	ut_asserteq(ret, 0);
438	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
439		   0, 0, 0, 0);
440	/* allocate overlapping region should fail */
441	ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
442	ut_asserteq(ret, -1);
443	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
444		   0, 0, 0, 0);
445	/* allocate 3nd region */
446	ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
447	ut_asserteq(ret, 0);
448	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
449		   0x40030000, 0x10000, 0, 0);
450	/* allocate 2nd region , This should coalesced all region into one */
451	ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
452	ut_assert(ret >= 0);
453	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
454		   0, 0, 0, 0);
455
456	/* allocate 2nd region, which should be added as first region */
457	ret = lmb_reserve(&lmb, 0x40000000, 0x8000);
458	ut_assert(ret >= 0);
459	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x8000,
460		   0x40010000, 0x30000, 0, 0);
461
462	/* allocate 3rd region, coalesce with first and overlap with second */
463	ret = lmb_reserve(&lmb, 0x40008000, 0x10000);
464	ut_assert(ret >= 0);
465	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x40000,
466		   0, 0, 0, 0);
467	return 0;
468}
469LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
470
471/*
472 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
473 * Expect addresses outside the memory range to fail.
474 */
475static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
476{
477	const phys_size_t ram_size = 0x20000000;
478	const phys_addr_t ram_end = ram + ram_size;
479	const phys_size_t alloc_addr_a = ram + 0x8000000;
480	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
481	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
482	struct lmb lmb;
483	long ret;
484	phys_addr_t a, b, c, d, e;
485
486	/* check for overflow */
487	ut_assert(ram_end == 0 || ram_end > ram);
488
489	lmb_init(&lmb);
490
491	ret = lmb_add(&lmb, ram, ram_size);
492	ut_asserteq(ret, 0);
493
494	/*  reserve 3 blocks */
495	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
496	ut_asserteq(ret, 0);
497	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
498	ut_asserteq(ret, 0);
499	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
500	ut_asserteq(ret, 0);
501	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
502		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
503
504	/* allocate blocks */
505	a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
506	ut_asserteq(a, ram);
507	ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
508		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
509	b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
510			   alloc_addr_b - alloc_addr_a - 0x10000);
511	ut_asserteq(b, alloc_addr_a + 0x10000);
512	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
513		   alloc_addr_c, 0x10000, 0, 0);
514	c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
515			   alloc_addr_c - alloc_addr_b - 0x10000);
516	ut_asserteq(c, alloc_addr_b + 0x10000);
517	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
518		   0, 0, 0, 0);
519	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
520			   ram_end - alloc_addr_c - 0x10000);
521	ut_asserteq(d, alloc_addr_c + 0x10000);
522	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
523		   0, 0, 0, 0);
524
525	/* allocating anything else should fail */
526	e = lmb_alloc(&lmb, 1, 1);
527	ut_asserteq(e, 0);
528	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
529		   0, 0, 0, 0);
530
531	ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
532	ut_asserteq(ret, 0);
533
534	/* allocate at 3 points in free range */
535
536	d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
537	ut_asserteq(d, ram_end - 4);
538	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
539		   d, 4, 0, 0);
540	ret = lmb_free(&lmb, d, 4);
541	ut_asserteq(ret, 0);
542	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
543		   0, 0, 0, 0);
544
545	d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
546	ut_asserteq(d, ram_end - 128);
547	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
548		   d, 4, 0, 0);
549	ret = lmb_free(&lmb, d, 4);
550	ut_asserteq(ret, 0);
551	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
552		   0, 0, 0, 0);
553
554	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
555	ut_asserteq(d, alloc_addr_c + 0x10000);
556	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
557		   0, 0, 0, 0);
558	ret = lmb_free(&lmb, d, 4);
559	ut_asserteq(ret, 0);
560	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
561		   0, 0, 0, 0);
562
563	/* allocate at the bottom */
564	ret = lmb_free(&lmb, a, alloc_addr_a - ram);
565	ut_asserteq(ret, 0);
566	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
567		   0, 0, 0, 0);
568	d = lmb_alloc_addr(&lmb, ram, 4);
569	ut_asserteq(d, ram);
570	ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
571		   ram + 0x8000000, 0x10010000, 0, 0);
572
573	/* check that allocating outside memory fails */
574	if (ram_end != 0) {
575		ret = lmb_alloc_addr(&lmb, ram_end, 1);
576		ut_asserteq(ret, 0);
577	}
578	if (ram != 0) {
579		ret = lmb_alloc_addr(&lmb, ram - 1, 1);
580		ut_asserteq(ret, 0);
581	}
582
583	return 0;
584}
585
586static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
587{
588	int ret;
589
590	/* simulate 512 MiB RAM beginning at 1GiB */
591	ret = test_alloc_addr(uts, 0x40000000);
592	if (ret)
593		return ret;
594
595	/* simulate 512 MiB RAM beginning at 1.5GiB */
596	return test_alloc_addr(uts, 0xE0000000);
597}
598LIB_TEST(lib_test_lmb_alloc_addr, 0);
599
600/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
601static int test_get_unreserved_size(struct unit_test_state *uts,
602				    const phys_addr_t ram)
603{
604	const phys_size_t ram_size = 0x20000000;
605	const phys_addr_t ram_end = ram + ram_size;
606	const phys_size_t alloc_addr_a = ram + 0x8000000;
607	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
608	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
609	struct lmb lmb;
610	long ret;
611	phys_size_t s;
612
613	/* check for overflow */
614	ut_assert(ram_end == 0 || ram_end > ram);
615
616	lmb_init(&lmb);
617
618	ret = lmb_add(&lmb, ram, ram_size);
619	ut_asserteq(ret, 0);
620
621	/*  reserve 3 blocks */
622	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
623	ut_asserteq(ret, 0);
624	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
625	ut_asserteq(ret, 0);
626	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
627	ut_asserteq(ret, 0);
628	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
629		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
630
631	/* check addresses in between blocks */
632	s = lmb_get_free_size(&lmb, ram);
633	ut_asserteq(s, alloc_addr_a - ram);
634	s = lmb_get_free_size(&lmb, ram + 0x10000);
635	ut_asserteq(s, alloc_addr_a - ram - 0x10000);
636	s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
637	ut_asserteq(s, 4);
638
639	s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
640	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
641	s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
642	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
643	s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
644	ut_asserteq(s, 4);
645
646	s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
647	ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
648	s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
649	ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
650	s = lmb_get_free_size(&lmb, ram_end - 4);
651	ut_asserteq(s, 4);
652
653	return 0;
654}
655
656static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
657{
658	int ret;
659
660	/* simulate 512 MiB RAM beginning at 1GiB */
661	ret = test_get_unreserved_size(uts, 0x40000000);
662	if (ret)
663		return ret;
664
665	/* simulate 512 MiB RAM beginning at 1.5GiB */
666	return test_get_unreserved_size(uts, 0xE0000000);
667}
668LIB_TEST(lib_test_lmb_get_free_size, 0);
669
670#ifdef CONFIG_LMB_USE_MAX_REGIONS
671static int lib_test_lmb_max_regions(struct unit_test_state *uts)
672{
673	const phys_addr_t ram = 0x00000000;
674	/*
675	 * All of 32bit memory space will contain regions for this test, so
676	 * we need to scale ram_size (which in this case is the size of the lmb
677	 * region) to match.
678	 */
679	const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
680			+ 1) * CONFIG_LMB_MAX_REGIONS;
681	const phys_size_t blk_size = 0x10000;
682	phys_addr_t offset;
683	struct lmb lmb;
684	int ret, i;
685
686	lmb_init(&lmb);
687
688	ut_asserteq(lmb.memory.cnt, 0);
689	ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
690	ut_asserteq(lmb.reserved.cnt, 0);
691	ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
692
693	/*  Add CONFIG_LMB_MAX_REGIONS memory regions */
694	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
695		offset = ram + 2 * i * ram_size;
696		ret = lmb_add(&lmb, offset, ram_size);
697		ut_asserteq(ret, 0);
698	}
699	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
700	ut_asserteq(lmb.reserved.cnt, 0);
701
702	/*  error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
703	offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
704	ret = lmb_add(&lmb, offset, ram_size);
705	ut_asserteq(ret, -1);
706
707	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
708	ut_asserteq(lmb.reserved.cnt, 0);
709
710	/*  reserve CONFIG_LMB_MAX_REGIONS regions */
711	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
712		offset = ram + 2 * i * blk_size;
713		ret = lmb_reserve(&lmb, offset, blk_size);
714		ut_asserteq(ret, 0);
715	}
716
717	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
718	ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
719
720	/*  error for the 9th reserved blocks */
721	offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
722	ret = lmb_reserve(&lmb, offset, blk_size);
723	ut_asserteq(ret, -1);
724
725	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
726	ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
727
728	/*  check each regions */
729	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
730		ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
731
732	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
733		ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
734
735	return 0;
736}
737LIB_TEST(lib_test_lmb_max_regions, 0);
738#endif
739
740static int lib_test_lmb_flags(struct unit_test_state *uts)
741{
742	const phys_addr_t ram = 0x40000000;
743	const phys_size_t ram_size = 0x20000000;
744	struct lmb lmb;
745	long ret;
746
747	lmb_init(&lmb);
748
749	ret = lmb_add(&lmb, ram, ram_size);
750	ut_asserteq(ret, 0);
751
752	/* reserve, same flag */
753	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
754	ut_asserteq(ret, 0);
755	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
756		   0, 0, 0, 0);
757
758	/* reserve again, same flag */
759	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
760	ut_asserteq(ret, 0);
761	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
762		   0, 0, 0, 0);
763
764	/* reserve again, new flag */
765	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
766	ut_asserteq(ret, -1);
767	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
768		   0, 0, 0, 0);
769
770	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
771
772	/* merge after */
773	ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
774	ut_asserteq(ret, 1);
775	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
776		   0, 0, 0, 0);
777
778	/* merge before */
779	ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
780	ut_asserteq(ret, 1);
781	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
782		   0, 0, 0, 0);
783
784	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
785
786	ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
787	ut_asserteq(ret, 0);
788	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
789		   0x40030000, 0x10000, 0, 0);
790
791	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
792	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
793
794	/* test that old API use LMB_NONE */
795	ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
796	ut_asserteq(ret, 1);
797	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
798		   0x40030000, 0x20000, 0, 0);
799
800	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
801	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
802
803	ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
804	ut_asserteq(ret, 0);
805	ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
806		   0x40030000, 0x20000, 0x40070000, 0x10000);
807
808	ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
809	ut_asserteq(ret, 0);
810	ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
811		   0x40030000, 0x20000, 0x40050000, 0x10000);
812
813	/* merge with 2 adjacent regions */
814	ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
815	ut_asserteq(ret, 2);
816	ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
817		   0x40030000, 0x20000, 0x40050000, 0x30000);
818
819	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
820	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
821	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
822
823	return 0;
824}
825LIB_TEST(lib_test_lmb_flags, 0);
826