1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2018 Simon Goldschmidt
4  */
5 
6 #include <common.h>
7 #include <dm.h>
8 #include <lmb.h>
9 #include <log.h>
10 #include <malloc.h>
11 #include <dm/test.h>
12 #include <test/test.h>
13 #include <test/ut.h>
14 
lmb_is_nomap(struct lmb_property * m)15 static inline bool lmb_is_nomap(struct lmb_property *m)
16 {
17 	return m->flags & LMB_NOMAP;
18 }
19 
check_lmb(struct unit_test_state * uts,struct lmb * lmb,phys_addr_t ram_base,phys_size_t ram_size,unsigned long num_reserved,phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2,phys_addr_t base3,phys_size_t size3)20 static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
21 		     phys_addr_t ram_base, phys_size_t ram_size,
22 		     unsigned long num_reserved,
23 		     phys_addr_t base1, phys_size_t size1,
24 		     phys_addr_t base2, phys_size_t size2,
25 		     phys_addr_t base3, phys_size_t size3)
26 {
27 	if (ram_size) {
28 		ut_asserteq(lmb->memory.cnt, 1);
29 		ut_asserteq(lmb->memory.region[0].base, ram_base);
30 		ut_asserteq(lmb->memory.region[0].size, ram_size);
31 	}
32 
33 	ut_asserteq(lmb->reserved.cnt, num_reserved);
34 	if (num_reserved > 0) {
35 		ut_asserteq(lmb->reserved.region[0].base, base1);
36 		ut_asserteq(lmb->reserved.region[0].size, size1);
37 	}
38 	if (num_reserved > 1) {
39 		ut_asserteq(lmb->reserved.region[1].base, base2);
40 		ut_asserteq(lmb->reserved.region[1].size, size2);
41 	}
42 	if (num_reserved > 2) {
43 		ut_asserteq(lmb->reserved.region[2].base, base3);
44 		ut_asserteq(lmb->reserved.region[2].size, size3);
45 	}
46 	return 0;
47 }
48 
49 #define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
50 		   base2, size2, base3, size3) \
51 		   ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
52 			     num_reserved, base1, size1, base2, size2, base3, \
53 			     size3))
54 
55 /*
56  * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
57  * then does some alloc + free tests.
58  */
test_multi_alloc(struct unit_test_state * uts,const phys_addr_t ram,const phys_size_t ram_size,const phys_addr_t ram0,const phys_size_t ram0_size,const phys_addr_t alloc_64k_addr)59 static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
60 			    const phys_size_t ram_size, const phys_addr_t ram0,
61 			    const phys_size_t ram0_size,
62 			    const phys_addr_t alloc_64k_addr)
63 {
64 	const phys_addr_t ram_end = ram + ram_size;
65 	const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
66 
67 	struct lmb lmb;
68 	long ret;
69 	phys_addr_t a, a2, b, b2, c, d;
70 
71 	/* check for overflow */
72 	ut_assert(ram_end == 0 || ram_end > ram);
73 	ut_assert(alloc_64k_end > alloc_64k_addr);
74 	/* check input addresses + size */
75 	ut_assert(alloc_64k_addr >= ram + 8);
76 	ut_assert(alloc_64k_end <= ram_end - 8);
77 
78 	lmb_init(&lmb);
79 
80 	if (ram0_size) {
81 		ret = lmb_add(&lmb, ram0, ram0_size);
82 		ut_asserteq(ret, 0);
83 	}
84 
85 	ret = lmb_add(&lmb, ram, ram_size);
86 	ut_asserteq(ret, 0);
87 
88 	if (ram0_size) {
89 		ut_asserteq(lmb.memory.cnt, 2);
90 		ut_asserteq(lmb.memory.region[0].base, ram0);
91 		ut_asserteq(lmb.memory.region[0].size, ram0_size);
92 		ut_asserteq(lmb.memory.region[1].base, ram);
93 		ut_asserteq(lmb.memory.region[1].size, ram_size);
94 	} else {
95 		ut_asserteq(lmb.memory.cnt, 1);
96 		ut_asserteq(lmb.memory.region[0].base, ram);
97 		ut_asserteq(lmb.memory.region[0].size, ram_size);
98 	}
99 
100 	/* reserve 64KiB somewhere */
101 	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
102 	ut_asserteq(ret, 0);
103 	ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
104 		   0, 0, 0, 0);
105 
106 	/* allocate somewhere, should be at the end of RAM */
107 	a = lmb_alloc(&lmb, 4, 1);
108 	ut_asserteq(a, ram_end - 4);
109 	ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
110 		   ram_end - 4, 4, 0, 0);
111 	/* alloc below end of reserved region -> below reserved region */
112 	b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
113 	ut_asserteq(b, alloc_64k_addr - 4);
114 	ASSERT_LMB(&lmb, 0, 0, 2,
115 		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
116 
117 	/* 2nd time */
118 	c = lmb_alloc(&lmb, 4, 1);
119 	ut_asserteq(c, ram_end - 8);
120 	ASSERT_LMB(&lmb, 0, 0, 2,
121 		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
122 	d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
123 	ut_asserteq(d, alloc_64k_addr - 8);
124 	ASSERT_LMB(&lmb, 0, 0, 2,
125 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
126 
127 	ret = lmb_free(&lmb, a, 4);
128 	ut_asserteq(ret, 0);
129 	ASSERT_LMB(&lmb, 0, 0, 2,
130 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
131 	/* allocate again to ensure we get the same address */
132 	a2 = lmb_alloc(&lmb, 4, 1);
133 	ut_asserteq(a, a2);
134 	ASSERT_LMB(&lmb, 0, 0, 2,
135 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
136 	ret = lmb_free(&lmb, a2, 4);
137 	ut_asserteq(ret, 0);
138 	ASSERT_LMB(&lmb, 0, 0, 2,
139 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
140 
141 	ret = lmb_free(&lmb, b, 4);
142 	ut_asserteq(ret, 0);
143 	ASSERT_LMB(&lmb, 0, 0, 3,
144 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
145 		   ram_end - 8, 4);
146 	/* allocate again to ensure we get the same address */
147 	b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
148 	ut_asserteq(b, b2);
149 	ASSERT_LMB(&lmb, 0, 0, 2,
150 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
151 	ret = lmb_free(&lmb, b2, 4);
152 	ut_asserteq(ret, 0);
153 	ASSERT_LMB(&lmb, 0, 0, 3,
154 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
155 		   ram_end - 8, 4);
156 
157 	ret = lmb_free(&lmb, c, 4);
158 	ut_asserteq(ret, 0);
159 	ASSERT_LMB(&lmb, 0, 0, 2,
160 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
161 	ret = lmb_free(&lmb, d, 4);
162 	ut_asserteq(ret, 0);
163 	ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
164 		   0, 0, 0, 0);
165 
166 	if (ram0_size) {
167 		ut_asserteq(lmb.memory.cnt, 2);
168 		ut_asserteq(lmb.memory.region[0].base, ram0);
169 		ut_asserteq(lmb.memory.region[0].size, ram0_size);
170 		ut_asserteq(lmb.memory.region[1].base, ram);
171 		ut_asserteq(lmb.memory.region[1].size, ram_size);
172 	} else {
173 		ut_asserteq(lmb.memory.cnt, 1);
174 		ut_asserteq(lmb.memory.region[0].base, ram);
175 		ut_asserteq(lmb.memory.region[0].size, ram_size);
176 	}
177 
178 	return 0;
179 }
180 
test_multi_alloc_512mb(struct unit_test_state * uts,const phys_addr_t ram)181 static int test_multi_alloc_512mb(struct unit_test_state *uts,
182 				  const phys_addr_t ram)
183 {
184 	return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
185 }
186 
test_multi_alloc_512mb_x2(struct unit_test_state * uts,const phys_addr_t ram,const phys_addr_t ram0)187 static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
188 				     const phys_addr_t ram,
189 				     const phys_addr_t ram0)
190 {
191 	return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
192 				ram + 0x10000000);
193 }
194 
195 /* Create a memory region with one reserved region and allocate */
lib_test_lmb_simple(struct unit_test_state * uts)196 static int lib_test_lmb_simple(struct unit_test_state *uts)
197 {
198 	int ret;
199 
200 	/* simulate 512 MiB RAM beginning at 1GiB */
201 	ret = test_multi_alloc_512mb(uts, 0x40000000);
202 	if (ret)
203 		return ret;
204 
205 	/* simulate 512 MiB RAM beginning at 1.5GiB */
206 	return test_multi_alloc_512mb(uts, 0xE0000000);
207 }
208 
209 DM_TEST(lib_test_lmb_simple, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
210 
211 /* Create two memory regions with one reserved region and allocate */
lib_test_lmb_simple_x2(struct unit_test_state * uts)212 static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
213 {
214 	int ret;
215 
216 	/* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
217 	ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
218 	if (ret)
219 		return ret;
220 
221 	/* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
222 	return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
223 }
224 
225 DM_TEST(lib_test_lmb_simple_x2,  UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
226 
227 /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
test_bigblock(struct unit_test_state * uts,const phys_addr_t ram)228 static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
229 {
230 	const phys_size_t ram_size = 0x20000000;
231 	const phys_size_t big_block_size = 0x10000000;
232 	const phys_addr_t ram_end = ram + ram_size;
233 	const phys_addr_t alloc_64k_addr = ram + 0x10000000;
234 	struct lmb lmb;
235 	long ret;
236 	phys_addr_t a, b;
237 
238 	/* check for overflow */
239 	ut_assert(ram_end == 0 || ram_end > ram);
240 
241 	lmb_init(&lmb);
242 
243 	ret = lmb_add(&lmb, ram, ram_size);
244 	ut_asserteq(ret, 0);
245 
246 	/* reserve 64KiB in the middle of RAM */
247 	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
248 	ut_asserteq(ret, 0);
249 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
250 		   0, 0, 0, 0);
251 
252 	/* allocate a big block, should be below reserved */
253 	a = lmb_alloc(&lmb, big_block_size, 1);
254 	ut_asserteq(a, ram);
255 	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
256 		   big_block_size + 0x10000, 0, 0, 0, 0);
257 	/* allocate 2nd big block */
258 	/* This should fail, printing an error */
259 	b = lmb_alloc(&lmb, big_block_size, 1);
260 	ut_asserteq(b, 0);
261 	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
262 		   big_block_size + 0x10000, 0, 0, 0, 0);
263 
264 	ret = lmb_free(&lmb, a, big_block_size);
265 	ut_asserteq(ret, 0);
266 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
267 		   0, 0, 0, 0);
268 
269 	/* allocate too big block */
270 	/* This should fail, printing an error */
271 	a = lmb_alloc(&lmb, ram_size, 1);
272 	ut_asserteq(a, 0);
273 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
274 		   0, 0, 0, 0);
275 
276 	return 0;
277 }
278 
lib_test_lmb_big(struct unit_test_state * uts)279 static int lib_test_lmb_big(struct unit_test_state *uts)
280 {
281 	int ret;
282 
283 	/* simulate 512 MiB RAM beginning at 1GiB */
284 	ret = test_bigblock(uts, 0x40000000);
285 	if (ret)
286 		return ret;
287 
288 	/* simulate 512 MiB RAM beginning at 1.5GiB */
289 	return test_bigblock(uts, 0xE0000000);
290 }
291 
292 DM_TEST(lib_test_lmb_big, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
293 
294 /* Simulate 512 MiB RAM, allocate a block without previous reservation */
test_noreserved(struct unit_test_state * uts,const phys_addr_t ram,const phys_addr_t alloc_size,const ulong align)295 static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
296 			   const phys_addr_t alloc_size, const ulong align)
297 {
298 	const phys_size_t ram_size = 0x20000000;
299 	const phys_addr_t ram_end = ram + ram_size;
300 	struct lmb lmb;
301 	long ret;
302 	phys_addr_t a, b;
303 	const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
304 		~(align - 1);
305 
306 	/* check for overflow */
307 	ut_assert(ram_end == 0 || ram_end > ram);
308 
309 	lmb_init(&lmb);
310 
311 	ret = lmb_add(&lmb, ram, ram_size);
312 	ut_asserteq(ret, 0);
313 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
314 
315 	/* allocate a block */
316 	a = lmb_alloc(&lmb, alloc_size, align);
317 	ut_assert(a != 0);
318 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
319 		   alloc_size, 0, 0, 0, 0);
320 	/* allocate another block */
321 	b = lmb_alloc(&lmb, alloc_size, align);
322 	ut_assert(b != 0);
323 	if (alloc_size == alloc_size_aligned) {
324 		ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
325 			   (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
326 			   0);
327 	} else {
328 		ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
329 			   (alloc_size_aligned * 2), alloc_size, ram + ram_size
330 			   - alloc_size_aligned, alloc_size, 0, 0);
331 	}
332 	/* and free them */
333 	ret = lmb_free(&lmb, b, alloc_size);
334 	ut_asserteq(ret, 0);
335 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
336 		   alloc_size, 0, 0, 0, 0);
337 	ret = lmb_free(&lmb, a, alloc_size);
338 	ut_asserteq(ret, 0);
339 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
340 
341 	/* allocate a block with base*/
342 	b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
343 	ut_assert(a == b);
344 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
345 		   alloc_size, 0, 0, 0, 0);
346 	/* and free it */
347 	ret = lmb_free(&lmb, b, alloc_size);
348 	ut_asserteq(ret, 0);
349 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
350 
351 	return 0;
352 }
353 
lib_test_lmb_noreserved(struct unit_test_state * uts)354 static int lib_test_lmb_noreserved(struct unit_test_state *uts)
355 {
356 	int ret;
357 
358 	/* simulate 512 MiB RAM beginning at 1GiB */
359 	ret = test_noreserved(uts, 0x40000000, 4, 1);
360 	if (ret)
361 		return ret;
362 
363 	/* simulate 512 MiB RAM beginning at 1.5GiB */
364 	return test_noreserved(uts, 0xE0000000, 4, 1);
365 }
366 
367 DM_TEST(lib_test_lmb_noreserved, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
368 
lib_test_lmb_unaligned_size(struct unit_test_state * uts)369 static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
370 {
371 	int ret;
372 
373 	/* simulate 512 MiB RAM beginning at 1GiB */
374 	ret = test_noreserved(uts, 0x40000000, 5, 8);
375 	if (ret)
376 		return ret;
377 
378 	/* simulate 512 MiB RAM beginning at 1.5GiB */
379 	return test_noreserved(uts, 0xE0000000, 5, 8);
380 }
381 
382 DM_TEST(lib_test_lmb_unaligned_size, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
383 /*
384  * Simulate a RAM that starts at 0 and allocate down to address 0, which must
385  * fail as '0' means failure for the lmb_alloc functions.
386  */
lib_test_lmb_at_0(struct unit_test_state * uts)387 static int lib_test_lmb_at_0(struct unit_test_state *uts)
388 {
389 	const phys_addr_t ram = 0;
390 	const phys_size_t ram_size = 0x20000000;
391 	struct lmb lmb;
392 	long ret;
393 	phys_addr_t a, b;
394 
395 	lmb_init(&lmb);
396 
397 	ret = lmb_add(&lmb, ram, ram_size);
398 	ut_asserteq(ret, 0);
399 
400 	/* allocate nearly everything */
401 	a = lmb_alloc(&lmb, ram_size - 4, 1);
402 	ut_asserteq(a, ram + 4);
403 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
404 		   0, 0, 0, 0);
405 	/* allocate the rest */
406 	/* This should fail as the allocated address would be 0 */
407 	b = lmb_alloc(&lmb, 4, 1);
408 	ut_asserteq(b, 0);
409 	/* check that this was an error by checking lmb */
410 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
411 		   0, 0, 0, 0);
412 	/* check that this was an error by freeing b */
413 	ret = lmb_free(&lmb, b, 4);
414 	ut_asserteq(ret, -1);
415 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
416 		   0, 0, 0, 0);
417 
418 	ret = lmb_free(&lmb, a, ram_size - 4);
419 	ut_asserteq(ret, 0);
420 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
421 
422 	return 0;
423 }
424 
425 DM_TEST(lib_test_lmb_at_0, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
426 
427 /* Check that calling lmb_reserve with overlapping regions fails. */
lib_test_lmb_overlapping_reserve(struct unit_test_state * uts)428 static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
429 {
430 	const phys_addr_t ram = 0x40000000;
431 	const phys_size_t ram_size = 0x20000000;
432 	struct lmb lmb;
433 	long ret;
434 
435 	lmb_init(&lmb);
436 
437 	ret = lmb_add(&lmb, ram, ram_size);
438 	ut_asserteq(ret, 0);
439 
440 	ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
441 	ut_asserteq(ret, 0);
442 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
443 		   0, 0, 0, 0);
444 	/* allocate overlapping region should fail */
445 	ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
446 	ut_asserteq(ret, -1);
447 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
448 		   0, 0, 0, 0);
449 	/* allocate 3nd region */
450 	ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
451 	ut_asserteq(ret, 0);
452 	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
453 		   0x40030000, 0x10000, 0, 0);
454 	/* allocate 2nd region */
455 	ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
456 	ut_assert(ret >= 0);
457 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
458 		   0, 0, 0, 0);
459 
460 	return 0;
461 }
462 
463 DM_TEST(lib_test_lmb_overlapping_reserve,
464 	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
465 
466 /*
467  * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
468  * Expect addresses outside the memory range to fail.
469  */
test_alloc_addr(struct unit_test_state * uts,const phys_addr_t ram)470 static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
471 {
472 	const phys_size_t ram_size = 0x20000000;
473 	const phys_addr_t ram_end = ram + ram_size;
474 	const phys_size_t alloc_addr_a = ram + 0x8000000;
475 	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
476 	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
477 	struct lmb lmb;
478 	long ret;
479 	phys_addr_t a, b, c, d, e;
480 
481 	/* check for overflow */
482 	ut_assert(ram_end == 0 || ram_end > ram);
483 
484 	lmb_init(&lmb);
485 
486 	ret = lmb_add(&lmb, ram, ram_size);
487 	ut_asserteq(ret, 0);
488 
489 	/*  reserve 3 blocks */
490 	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
491 	ut_asserteq(ret, 0);
492 	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
493 	ut_asserteq(ret, 0);
494 	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
495 	ut_asserteq(ret, 0);
496 	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
497 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
498 
499 	/* allocate blocks */
500 	a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
501 	ut_asserteq(a, ram);
502 	ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
503 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
504 	b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
505 			   alloc_addr_b - alloc_addr_a - 0x10000);
506 	ut_asserteq(b, alloc_addr_a + 0x10000);
507 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
508 		   alloc_addr_c, 0x10000, 0, 0);
509 	c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
510 			   alloc_addr_c - alloc_addr_b - 0x10000);
511 	ut_asserteq(c, alloc_addr_b + 0x10000);
512 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
513 		   0, 0, 0, 0);
514 	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
515 			   ram_end - alloc_addr_c - 0x10000);
516 	ut_asserteq(d, alloc_addr_c + 0x10000);
517 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
518 		   0, 0, 0, 0);
519 
520 	/* allocating anything else should fail */
521 	e = lmb_alloc(&lmb, 1, 1);
522 	ut_asserteq(e, 0);
523 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
524 		   0, 0, 0, 0);
525 
526 	ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
527 	ut_asserteq(ret, 0);
528 
529 	/* allocate at 3 points in free range */
530 
531 	d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
532 	ut_asserteq(d, ram_end - 4);
533 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
534 		   d, 4, 0, 0);
535 	ret = lmb_free(&lmb, d, 4);
536 	ut_asserteq(ret, 0);
537 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
538 		   0, 0, 0, 0);
539 
540 	d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
541 	ut_asserteq(d, ram_end - 128);
542 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
543 		   d, 4, 0, 0);
544 	ret = lmb_free(&lmb, d, 4);
545 	ut_asserteq(ret, 0);
546 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
547 		   0, 0, 0, 0);
548 
549 	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
550 	ut_asserteq(d, alloc_addr_c + 0x10000);
551 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
552 		   0, 0, 0, 0);
553 	ret = lmb_free(&lmb, d, 4);
554 	ut_asserteq(ret, 0);
555 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
556 		   0, 0, 0, 0);
557 
558 	/* allocate at the bottom */
559 	ret = lmb_free(&lmb, a, alloc_addr_a - ram);
560 	ut_asserteq(ret, 0);
561 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
562 		   0, 0, 0, 0);
563 	d = lmb_alloc_addr(&lmb, ram, 4);
564 	ut_asserteq(d, ram);
565 	ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
566 		   ram + 0x8000000, 0x10010000, 0, 0);
567 
568 	/* check that allocating outside memory fails */
569 	if (ram_end != 0) {
570 		ret = lmb_alloc_addr(&lmb, ram_end, 1);
571 		ut_asserteq(ret, 0);
572 	}
573 	if (ram != 0) {
574 		ret = lmb_alloc_addr(&lmb, ram - 1, 1);
575 		ut_asserteq(ret, 0);
576 	}
577 
578 	return 0;
579 }
580 
lib_test_lmb_alloc_addr(struct unit_test_state * uts)581 static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
582 {
583 	int ret;
584 
585 	/* simulate 512 MiB RAM beginning at 1GiB */
586 	ret = test_alloc_addr(uts, 0x40000000);
587 	if (ret)
588 		return ret;
589 
590 	/* simulate 512 MiB RAM beginning at 1.5GiB */
591 	return test_alloc_addr(uts, 0xE0000000);
592 }
593 
594 DM_TEST(lib_test_lmb_alloc_addr, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
595 
596 /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
test_get_unreserved_size(struct unit_test_state * uts,const phys_addr_t ram)597 static int test_get_unreserved_size(struct unit_test_state *uts,
598 				    const phys_addr_t ram)
599 {
600 	const phys_size_t ram_size = 0x20000000;
601 	const phys_addr_t ram_end = ram + ram_size;
602 	const phys_size_t alloc_addr_a = ram + 0x8000000;
603 	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
604 	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
605 	struct lmb lmb;
606 	long ret;
607 	phys_size_t s;
608 
609 	/* check for overflow */
610 	ut_assert(ram_end == 0 || ram_end > ram);
611 
612 	lmb_init(&lmb);
613 
614 	ret = lmb_add(&lmb, ram, ram_size);
615 	ut_asserteq(ret, 0);
616 
617 	/*  reserve 3 blocks */
618 	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
619 	ut_asserteq(ret, 0);
620 	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
621 	ut_asserteq(ret, 0);
622 	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
623 	ut_asserteq(ret, 0);
624 	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
625 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
626 
627 	/* check addresses in between blocks */
628 	s = lmb_get_free_size(&lmb, ram);
629 	ut_asserteq(s, alloc_addr_a - ram);
630 	s = lmb_get_free_size(&lmb, ram + 0x10000);
631 	ut_asserteq(s, alloc_addr_a - ram - 0x10000);
632 	s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
633 	ut_asserteq(s, 4);
634 
635 	s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
636 	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
637 	s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
638 	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
639 	s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
640 	ut_asserteq(s, 4);
641 
642 	s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
643 	ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
644 	s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
645 	ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
646 	s = lmb_get_free_size(&lmb, ram_end - 4);
647 	ut_asserteq(s, 4);
648 
649 	return 0;
650 }
651 
lib_test_lmb_get_free_size(struct unit_test_state * uts)652 static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
653 {
654 	int ret;
655 
656 	/* simulate 512 MiB RAM beginning at 1GiB */
657 	ret = test_get_unreserved_size(uts, 0x40000000);
658 	if (ret)
659 		return ret;
660 
661 	/* simulate 512 MiB RAM beginning at 1.5GiB */
662 	return test_get_unreserved_size(uts, 0xE0000000);
663 }
664 
665 DM_TEST(lib_test_lmb_get_free_size,
666 	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
667 
668 #ifdef CONFIG_LMB_USE_MAX_REGIONS
lib_test_lmb_max_regions(struct unit_test_state * uts)669 static int lib_test_lmb_max_regions(struct unit_test_state *uts)
670 {
671 	const phys_addr_t ram = 0x00000000;
672 	/*
673 	 * All of 32bit memory space will contain regions for this test, so
674 	 * we need to scale ram_size (which in this case is the size of the lmb
675 	 * region) to match.
676 	 */
677 	const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
678 			+ 1) * CONFIG_LMB_MAX_REGIONS;
679 	const phys_size_t blk_size = 0x10000;
680 	phys_addr_t offset;
681 	struct lmb lmb;
682 	int ret, i;
683 
684 	lmb_init(&lmb);
685 
686 	ut_asserteq(lmb.memory.cnt, 0);
687 	ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
688 	ut_asserteq(lmb.reserved.cnt, 0);
689 	ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
690 
691 	/*  Add CONFIG_LMB_MAX_REGIONS memory regions */
692 	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
693 		offset = ram + 2 * i * ram_size;
694 		ret = lmb_add(&lmb, offset, ram_size);
695 		ut_asserteq(ret, 0);
696 	}
697 	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
698 	ut_asserteq(lmb.reserved.cnt, 0);
699 
700 	/*  error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
701 	offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
702 	ret = lmb_add(&lmb, offset, ram_size);
703 	ut_asserteq(ret, -1);
704 
705 	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
706 	ut_asserteq(lmb.reserved.cnt, 0);
707 
708 	/*  reserve CONFIG_LMB_MAX_REGIONS regions */
709 	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
710 		offset = ram + 2 * i * blk_size;
711 		ret = lmb_reserve(&lmb, offset, blk_size);
712 		ut_asserteq(ret, 0);
713 	}
714 
715 	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
716 	ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
717 
718 	/*  error for the 9th reserved blocks */
719 	offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
720 	ret = lmb_reserve(&lmb, offset, blk_size);
721 	ut_asserteq(ret, -1);
722 
723 	ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
724 	ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
725 
726 	/*  check each regions */
727 	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
728 		ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
729 
730 	for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
731 		ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
732 
733 	return 0;
734 }
735 #endif
736 
737 DM_TEST(lib_test_lmb_max_regions,
738 	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
739 
lib_test_lmb_flags(struct unit_test_state * uts)740 static int lib_test_lmb_flags(struct unit_test_state *uts)
741 {
742 	const phys_addr_t ram = 0x40000000;
743 	const phys_size_t ram_size = 0x20000000;
744 	struct lmb lmb;
745 	long ret;
746 
747 	lmb_init(&lmb);
748 
749 	ret = lmb_add(&lmb, ram, ram_size);
750 	ut_asserteq(ret, 0);
751 
752 	/* reserve, same flag */
753 	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
754 	ut_asserteq(ret, 0);
755 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
756 		   0, 0, 0, 0);
757 
758 	/* reserve again, same flag */
759 	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
760 	ut_asserteq(ret, 0);
761 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
762 		   0, 0, 0, 0);
763 
764 	/* reserve again, new flag */
765 	ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
766 	ut_asserteq(ret, -1);
767 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
768 		   0, 0, 0, 0);
769 
770 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
771 
772 	/* merge after */
773 	ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
774 	ut_asserteq(ret, 1);
775 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
776 		   0, 0, 0, 0);
777 
778 	/* merge before */
779 	ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
780 	ut_asserteq(ret, 1);
781 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
782 		   0, 0, 0, 0);
783 
784 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
785 
786 	ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
787 	ut_asserteq(ret, 0);
788 	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
789 		   0x40030000, 0x10000, 0, 0);
790 
791 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
792 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
793 
794 	/* test that old API use LMB_NONE */
795 	ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
796 	ut_asserteq(ret, 1);
797 	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
798 		   0x40030000, 0x20000, 0, 0);
799 
800 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
801 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
802 
803 	ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
804 	ut_asserteq(ret, 0);
805 	ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
806 		   0x40030000, 0x20000, 0x40070000, 0x10000);
807 
808 	ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
809 	ut_asserteq(ret, 0);
810 	ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
811 		   0x40030000, 0x20000, 0x40050000, 0x10000);
812 
813 	/* merge with 2 adjacent regions */
814 	ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
815 	ut_asserteq(ret, 2);
816 	ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
817 		   0x40030000, 0x20000, 0x40050000, 0x30000);
818 
819 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
820 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
821 	ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
822 
823 	return 0;
824 }
825 
826 DM_TEST(lib_test_lmb_flags,
827 	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
828