1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
6 #include <alist.h>
7 #include <dm.h>
8 #include <lmb.h>
9 #include <log.h>
10 #include <malloc.h>
11 #include <dm/test.h>
12 #include <test/lib.h>
13 #include <test/test.h>
14 #include <test/ut.h>
15
lmb_is_nomap(struct lmb_region * m)16 static inline bool lmb_is_nomap(struct lmb_region *m)
17 {
18 return m->flags & LMB_NOMAP;
19 }
20
check_lmb(struct unit_test_state * uts,struct alist * mem_lst,struct alist * used_lst,phys_addr_t ram_base,phys_size_t ram_size,unsigned long num_reserved,phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2,phys_addr_t base3,phys_size_t size3)21 static int check_lmb(struct unit_test_state *uts, struct alist *mem_lst,
22 struct alist *used_lst, phys_addr_t ram_base,
23 phys_size_t ram_size, unsigned long num_reserved,
24 phys_addr_t base1, phys_size_t size1,
25 phys_addr_t base2, phys_size_t size2,
26 phys_addr_t base3, phys_size_t size3)
27 {
28 struct lmb_region *mem, *used;
29
30 mem = mem_lst->data;
31 used = used_lst->data;
32
33 if (ram_size) {
34 ut_asserteq(mem_lst->count, 1);
35 ut_asserteq(mem[0].base, ram_base);
36 ut_asserteq(mem[0].size, ram_size);
37 }
38
39 ut_asserteq(used_lst->count, num_reserved);
40 if (num_reserved > 0) {
41 ut_asserteq(used[0].base, base1);
42 ut_asserteq(used[0].size, size1);
43 }
44 if (num_reserved > 1) {
45 ut_asserteq(used[1].base, base2);
46 ut_asserteq(used[1].size, size2);
47 }
48 if (num_reserved > 2) {
49 ut_asserteq(used[2].base, base3);
50 ut_asserteq(used[2].size, size3);
51 }
52 return 0;
53 }
54
55 #define ASSERT_LMB(mem_lst, used_lst, ram_base, ram_size, num_reserved, base1, size1, \
56 base2, size2, base3, size3) \
57 ut_assert(!check_lmb(uts, mem_lst, used_lst, ram_base, ram_size, \
58 num_reserved, base1, size1, base2, size2, base3, \
59 size3))
60
setup_lmb_test(struct unit_test_state * uts,struct lmb * store,struct alist ** mem_lstp,struct alist ** used_lstp)61 static int setup_lmb_test(struct unit_test_state *uts, struct lmb *store,
62 struct alist **mem_lstp, struct alist **used_lstp)
63 {
64 struct lmb *lmb;
65
66 ut_assertok(lmb_push(store));
67 lmb = lmb_get();
68 *mem_lstp = &lmb->available_mem;
69 *used_lstp = &lmb->used_mem;
70
71 return 0;
72 }
73
lmb_reserve(phys_addr_t addr,phys_size_t size,u32 flags)74 static int lmb_reserve(phys_addr_t addr, phys_size_t size, u32 flags)
75 {
76 int err;
77
78 err = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, size, flags);
79 if (err)
80 return err;
81
82 return 0;
83 }
84
lmb_alloc(phys_size_t size,ulong align)85 static phys_addr_t lmb_alloc(phys_size_t size, ulong align)
86 {
87 int err;
88 phys_addr_t addr;
89
90 err = lmb_alloc_mem(LMB_MEM_ALLOC_ANY, align, &addr, size, LMB_NONE);
91 if (err)
92 return 0;
93
94 return addr;
95 }
96
lmb_alloc_base(phys_size_t size,ulong align,phys_addr_t max_addr,u32 flags)97 static phys_addr_t lmb_alloc_base(phys_size_t size, ulong align,
98 phys_addr_t max_addr, u32 flags)
99 {
100 int err;
101 phys_addr_t addr;
102
103 addr = max_addr;
104 err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, align, &addr, size, flags);
105 if (err)
106 return 0;
107
108 return addr;
109 }
110
111 #define lmb_alloc_addr(addr, size, flags) lmb_reserve(addr, size, flags)
112
test_multi_alloc(struct unit_test_state * uts,const phys_addr_t ram,const phys_size_t ram_size,const phys_addr_t ram0,const phys_size_t ram0_size,const phys_addr_t alloc_64k_addr)113 static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
114 const phys_size_t ram_size, const phys_addr_t ram0,
115 const phys_size_t ram0_size,
116 const phys_addr_t alloc_64k_addr)
117 {
118 const phys_addr_t ram_end = ram + ram_size;
119 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
120
121 long ret;
122 struct alist *mem_lst, *used_lst;
123 struct lmb_region *mem, *used;
124 phys_addr_t a, a2, b, b2, c, d;
125 struct lmb store;
126
127 /* check for overflow */
128 ut_assert(ram_end == 0 || ram_end > ram);
129 ut_assert(alloc_64k_end > alloc_64k_addr);
130 /* check input addresses + size */
131 ut_assert(alloc_64k_addr >= ram + 8);
132 ut_assert(alloc_64k_end <= ram_end - 8);
133
134 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
135 mem = mem_lst->data;
136 used = used_lst->data;
137
138 if (ram0_size) {
139 ret = lmb_add(ram0, ram0_size);
140 ut_asserteq(ret, 0);
141 }
142
143 ret = lmb_add(ram, ram_size);
144 ut_asserteq(ret, 0);
145
146 if (ram0_size) {
147 ut_asserteq(mem_lst->count, 2);
148 ut_asserteq(mem[0].base, ram0);
149 ut_asserteq(mem[0].size, ram0_size);
150 ut_asserteq(mem[1].base, ram);
151 ut_asserteq(mem[1].size, ram_size);
152 } else {
153 ut_asserteq(mem_lst->count, 1);
154 ut_asserteq(mem[0].base, ram);
155 ut_asserteq(mem[0].size, ram_size);
156 }
157
158 /* reserve 64KiB somewhere */
159 ret = lmb_reserve(alloc_64k_addr, 0x10000, LMB_NONE);
160 ut_asserteq(ret, 0);
161 ASSERT_LMB(mem_lst, used_lst, 0, 0, 1, alloc_64k_addr, 0x10000,
162 0, 0, 0, 0);
163
164 /* allocate somewhere, should be at the end of RAM */
165 a = lmb_alloc(4, 1);
166 ut_asserteq(a, ram_end - 4);
167 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, alloc_64k_addr, 0x10000,
168 ram_end - 4, 4, 0, 0);
169 /* alloc below end of reserved region -> below reserved region */
170 b = lmb_alloc_base(4, 1, alloc_64k_end, LMB_NONE);
171 ut_asserteq(b, alloc_64k_addr - 4);
172 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
173 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
174
175 /* 2nd time */
176 c = lmb_alloc(4, 1);
177 ut_asserteq(c, ram_end - 8);
178 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
179 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
180 d = lmb_alloc_base(4, 1, alloc_64k_end, LMB_NONE);
181 ut_asserteq(d, alloc_64k_addr - 8);
182 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
183 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
184
185 ret = lmb_free(a, 4, LMB_NONE);
186 ut_asserteq(ret, 0);
187 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
188 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
189 /* allocate again to ensure we get the same address */
190 a2 = lmb_alloc(4, 1);
191 ut_asserteq(a, a2);
192 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
193 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
194 ret = lmb_free(a2, 4, LMB_NONE);
195 ut_asserteq(ret, 0);
196 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
197 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
198
199 ret = lmb_free(b, 4, LMB_NONE);
200 ut_asserteq(ret, 0);
201 ASSERT_LMB(mem_lst, used_lst, 0, 0, 3,
202 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
203 ram_end - 8, 4);
204 /* allocate again to ensure we get the same address */
205 b2 = lmb_alloc_base(4, 1, alloc_64k_end, LMB_NONE);
206 ut_asserteq(b, b2);
207 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
208 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
209 ret = lmb_free(b2, 4, LMB_NONE);
210 ut_asserteq(ret, 0);
211 ASSERT_LMB(mem_lst, used_lst, 0, 0, 3,
212 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
213 ram_end - 8, 4);
214
215 ret = lmb_free(c, 4, LMB_NONE);
216 ut_asserteq(ret, 0);
217 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
218 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
219 ret = lmb_free(d, 4, LMB_NONE);
220 ut_asserteq(ret, 0);
221 ASSERT_LMB(mem_lst, used_lst, 0, 0, 1, alloc_64k_addr, 0x10000,
222 0, 0, 0, 0);
223
224 if (ram0_size) {
225 ut_asserteq(mem_lst->count, 2);
226 ut_asserteq(mem[0].base, ram0);
227 ut_asserteq(mem[0].size, ram0_size);
228 ut_asserteq(mem[1].base, ram);
229 ut_asserteq(mem[1].size, ram_size);
230 } else {
231 ut_asserteq(mem_lst->count, 1);
232 ut_asserteq(mem[0].base, ram);
233 ut_asserteq(mem[0].size, ram_size);
234 }
235
236 lmb_pop(&store);
237
238 return 0;
239 }
240
test_multi_alloc_512mb(struct unit_test_state * uts,const phys_addr_t ram)241 static int test_multi_alloc_512mb(struct unit_test_state *uts,
242 const phys_addr_t ram)
243 {
244 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
245 }
246
test_multi_alloc_512mb_x2(struct unit_test_state * uts,const phys_addr_t ram,const phys_addr_t ram0)247 static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
248 const phys_addr_t ram,
249 const phys_addr_t ram0)
250 {
251 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
252 ram + 0x10000000);
253 }
254
255 /* Create a memory region with one reserved region and allocate */
lib_test_lmb_simple(struct unit_test_state * uts)256 static int lib_test_lmb_simple(struct unit_test_state *uts)
257 {
258 int ret;
259
260 /* simulate 512 MiB RAM beginning at 1GiB */
261 ret = test_multi_alloc_512mb(uts, 0x40000000);
262 if (ret)
263 return ret;
264
265 /* simulate 512 MiB RAM beginning at 1.5GiB */
266 return test_multi_alloc_512mb(uts, 0xE0000000);
267 }
268 LIB_TEST(lib_test_lmb_simple, 0);
269
270 /* Create two memory regions with one reserved region and allocate */
lib_test_lmb_simple_x2(struct unit_test_state * uts)271 static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
272 {
273 int ret;
274
275 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
276 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
277 if (ret)
278 return ret;
279
280 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
281 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
282 }
283 LIB_TEST(lib_test_lmb_simple_x2, 0);
284
285 /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
test_bigblock(struct unit_test_state * uts,const phys_addr_t ram)286 static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
287 {
288 const phys_size_t ram_size = 0x20000000;
289 const phys_size_t big_block_size = 0x10000000;
290 const phys_addr_t ram_end = ram + ram_size;
291 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
292 struct alist *mem_lst, *used_lst;
293 long ret;
294 phys_addr_t a, b;
295 struct lmb store;
296
297 /* check for overflow */
298 ut_assert(ram_end == 0 || ram_end > ram);
299
300 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
301
302 ret = lmb_add(ram, ram_size);
303 ut_asserteq(ret, 0);
304
305 /* reserve 64KiB in the middle of RAM */
306 ret = lmb_reserve(alloc_64k_addr, 0x10000, LMB_NONE);
307 ut_asserteq(ret, 0);
308 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
309 0, 0, 0, 0);
310
311 /* allocate a big block, should be below reserved */
312 a = lmb_alloc(big_block_size, 1);
313 ut_asserteq(a, ram);
314 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a,
315 big_block_size + 0x10000, 0, 0, 0, 0);
316 /* allocate 2nd big block */
317 /* This should fail, printing an error */
318 b = lmb_alloc(big_block_size, 1);
319 ut_asserteq(b, 0);
320 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a,
321 big_block_size + 0x10000, 0, 0, 0, 0);
322
323 ret = lmb_free(a, big_block_size, LMB_NONE);
324 ut_asserteq(ret, 0);
325 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
326 0, 0, 0, 0);
327
328 /* allocate too big block */
329 /* This should fail, printing an error */
330 a = lmb_alloc(ram_size, 1);
331 ut_asserteq(a, 0);
332 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
333 0, 0, 0, 0);
334
335 lmb_pop(&store);
336
337 return 0;
338 }
339
lib_test_lmb_big(struct unit_test_state * uts)340 static int lib_test_lmb_big(struct unit_test_state *uts)
341 {
342 int ret;
343
344 /* simulate 512 MiB RAM beginning at 1GiB */
345 ret = test_bigblock(uts, 0x40000000);
346 if (ret)
347 return ret;
348
349 /* simulate 512 MiB RAM beginning at 1.5GiB */
350 return test_bigblock(uts, 0xE0000000);
351 }
352 LIB_TEST(lib_test_lmb_big, 0);
353
354 /* Simulate 512 MiB RAM, allocate a block without previous reservation */
test_noreserved(struct unit_test_state * uts,const phys_addr_t ram,const phys_addr_t alloc_size,const ulong align)355 static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
356 const phys_addr_t alloc_size, const ulong align)
357 {
358 const phys_size_t ram_size = 0x20000000;
359 const phys_addr_t ram_end = ram + ram_size;
360 long ret;
361 phys_addr_t a, b;
362 struct lmb store;
363 struct alist *mem_lst, *used_lst;
364 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
365 ~(align - 1);
366
367 /* check for overflow */
368 ut_assert(ram_end == 0 || ram_end > ram);
369
370 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
371
372 ret = lmb_add(ram, ram_size);
373 ut_asserteq(ret, 0);
374 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
375
376 /* allocate a block */
377 a = lmb_alloc(alloc_size, align);
378 ut_assert(a != 0);
379 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
380 ram + ram_size - alloc_size_aligned, alloc_size, 0, 0, 0, 0);
381
382 /* allocate another block */
383 b = lmb_alloc(alloc_size, align);
384 ut_assert(b != 0);
385 if (alloc_size == alloc_size_aligned) {
386 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram + ram_size -
387 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
388 0);
389 } else {
390 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram + ram_size -
391 (alloc_size_aligned * 2), alloc_size, ram + ram_size
392 - alloc_size_aligned, alloc_size, 0, 0);
393 }
394 /* and free them */
395 ret = lmb_free(b, alloc_size, LMB_NONE);
396 ut_asserteq(ret, 0);
397 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
398 ram + ram_size - alloc_size_aligned,
399 alloc_size, 0, 0, 0, 0);
400 ret = lmb_free(a, alloc_size, LMB_NONE);
401 ut_asserteq(ret, 0);
402 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
403
404 /* allocate a block with base*/
405 b = lmb_alloc_base(alloc_size, align, ram_end, LMB_NONE);
406 ut_assert(a == b);
407 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
408 ram + ram_size - alloc_size_aligned,
409 alloc_size, 0, 0, 0, 0);
410 /* and free it */
411 ret = lmb_free(b, alloc_size, LMB_NONE);
412 ut_asserteq(ret, 0);
413 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
414
415 lmb_pop(&store);
416
417 return 0;
418 }
419
lib_test_lmb_noreserved(struct unit_test_state * uts)420 static int lib_test_lmb_noreserved(struct unit_test_state *uts)
421 {
422 int ret;
423
424 /* simulate 512 MiB RAM beginning at 1GiB */
425 ret = test_noreserved(uts, 0x40000000, 4, 1);
426 if (ret)
427 return ret;
428
429 /* simulate 512 MiB RAM beginning at 1.5GiB */
430 return test_noreserved(uts, 0xE0000000, 4, 1);
431 }
432 LIB_TEST(lib_test_lmb_noreserved, 0);
433
lib_test_lmb_unaligned_size(struct unit_test_state * uts)434 static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
435 {
436 int ret;
437
438 /* simulate 512 MiB RAM beginning at 1GiB */
439 ret = test_noreserved(uts, 0x40000000, 5, 8);
440 if (ret)
441 return ret;
442
443 /* simulate 512 MiB RAM beginning at 1.5GiB */
444 return test_noreserved(uts, 0xE0000000, 5, 8);
445 }
446 LIB_TEST(lib_test_lmb_unaligned_size, 0);
447
448 /*
449 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
450 * fail as '0' means failure for the lmb_alloc functions.
451 */
lib_test_lmb_at_0(struct unit_test_state * uts)452 static int lib_test_lmb_at_0(struct unit_test_state *uts)
453 {
454 const phys_addr_t ram = 0;
455 const phys_size_t ram_size = 0x20000000;
456 struct lmb store;
457 struct alist *mem_lst, *used_lst;
458 long ret;
459 phys_addr_t a, b;
460
461 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
462
463 ret = lmb_add(ram, ram_size);
464 ut_asserteq(ret, 0);
465
466 /* allocate nearly everything */
467 a = lmb_alloc(ram_size - 4, 1);
468 ut_asserteq(a, ram + 4);
469 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
470 0, 0, 0, 0);
471 /* allocate the rest */
472 /* This should fail as the allocated address would be 0 */
473 b = lmb_alloc(4, 1);
474 ut_asserteq(b, 0);
475 /* check that this was an error by checking lmb */
476 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
477 0, 0, 0, 0);
478 /* check that this was an error by freeing b */
479 ret = lmb_free(b, 4, LMB_NONE);
480 ut_asserteq(ret, -1);
481 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
482 0, 0, 0, 0);
483
484 ret = lmb_free(a, ram_size - 4, LMB_NONE);
485 ut_asserteq(ret, 0);
486 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
487
488 lmb_pop(&store);
489
490 return 0;
491 }
492 LIB_TEST(lib_test_lmb_at_0, 0);
493
494 /* Check that calling lmb_reserve with overlapping regions fails. */
lib_test_lmb_overlapping_reserve(struct unit_test_state * uts)495 static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
496 {
497 const phys_addr_t ram = 0x40000000;
498 const phys_size_t ram_size = 0x20000000;
499 struct lmb store;
500 struct alist *mem_lst, *used_lst;
501 long ret;
502
503 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
504
505 ret = lmb_add(ram, ram_size);
506 ut_asserteq(ret, 0);
507
508 ret = lmb_reserve(0x40010000, 0x10000, LMB_NONE);
509 ut_asserteq(ret, 0);
510 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
511 0, 0, 0, 0);
512
513 /* allocate overlapping region */
514 ret = lmb_reserve(0x40011000, 0x10000, LMB_NONE);
515 ut_asserteq(ret, 0);
516 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x11000,
517 0, 0, 0, 0);
518 /* allocate 2nd region */
519 ret = lmb_reserve(0x40030000, 0x10000, LMB_NONE);
520 ut_asserteq(ret, 0);
521 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40010000, 0x11000,
522 0x40030000, 0x10000, 0, 0);
523 /* allocate 3rd region , This should coalesce all regions into one */
524 ret = lmb_reserve(0x40020000, 0x10000, LMB_NONE);
525 ut_assert(ret >= 0);
526 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x30000,
527 0, 0, 0, 0);
528
529 /* allocate 2nd region, which should be added as first region */
530 ret = lmb_reserve(0x40000000, 0x8000, LMB_NONE);
531 ut_assert(ret >= 0);
532 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x8000,
533 0x40010000, 0x30000, 0, 0);
534
535 /* allocate 3rd region, coalesce with first and overlap with second */
536 ret = lmb_reserve(0x40008000, 0x10000, LMB_NONE);
537 ut_assert(ret >= 0);
538 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40000000, 0x40000,
539 0, 0, 0, 0);
540
541 /* try to allocate overlapping region with a different flag, should fail */
542 ret = lmb_reserve(0x40008000, 0x1000, LMB_NOOVERWRITE);
543 ut_asserteq(ret, -EEXIST);
544
545 /* allocate another region at 0x40050000 with a different flag */
546 ret = lmb_reserve(0x40050000, 0x10000, LMB_NOOVERWRITE);
547 ut_asserteq(ret, 0);
548 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x40000,
549 0x40050000, 0x10000, 0, 0);
550
551 /*
552 * try to reserve a region adjacent to region 1 overlapping the 2nd region,
553 * should fail
554 */
555 ret = lmb_reserve(0x40040000, 0x20000, LMB_NONE);
556 ut_asserteq(ret, -EEXIST);
557
558 /*
559 * try to reserve a region between the two regions, but without an overlap,
560 * should succeed. this added region coalesces with the region 1
561 */
562 ret = lmb_reserve(0x40040000, 0x10000, LMB_NONE);
563 ut_asserteq(ret, 0);
564 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x50000,
565 0x40050000, 0x10000, 0, 0);
566
567 /*
568 * try to reserve a region which overlaps with both the regions,
569 * should fail as the flags do not match
570 */
571 ret = lmb_reserve(0x40020000, 0x80000, LMB_NONE);
572 ut_asserteq(ret, -EEXIST);
573 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x50000,
574 0x40050000, 0x10000, 0, 0);
575
576 lmb_pop(&store);
577
578 return 0;
579 }
580 LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
581
582 /*
583 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
584 * Expect addresses outside the memory range to fail.
585 */
test_alloc_addr(struct unit_test_state * uts,const phys_addr_t ram)586 static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
587 {
588 struct lmb store;
589 struct alist *mem_lst, *used_lst;
590 const phys_size_t ram_size = 0x20000000;
591 const phys_addr_t ram_end = ram + ram_size;
592 const phys_size_t alloc_addr_a = ram + 0x8000000;
593 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
594 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
595 long ret;
596 phys_addr_t a, b, c, d, e;
597
598 /* check for overflow */
599 ut_assert(ram_end == 0 || ram_end > ram);
600
601 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
602
603 ret = lmb_add(ram, ram_size);
604 ut_asserteq(ret, 0);
605
606 /* Try to allocate a page twice */
607 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE);
608 ut_asserteq(b, 0);
609 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
610 ut_asserteq(b, -EEXIST);
611 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE);
612 ut_asserteq(b, 0);
613 b = lmb_alloc_addr(alloc_addr_a, 0x2000, LMB_NONE);
614 ut_asserteq(b, 0);
615 ret = lmb_free(alloc_addr_a, 0x2000, LMB_NONE);
616 ut_asserteq(ret, 0);
617 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
618 ut_asserteq(b, 0);
619 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE);
620 ut_asserteq(b, -EEXIST);
621 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
622 ut_asserteq(b, -EEXIST);
623 ret = lmb_free(alloc_addr_a, 0x1000, LMB_NONE);
624 ut_asserteq(ret, 0);
625
626 /*
627 * Add two regions with different flags, region1 and region2 with
628 * a gap between them.
629 * Try adding another region, adjacent to region 1 and overlapping
630 * region 2. Should fail.
631 */
632 a = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE);
633 ut_asserteq(a, 0);
634
635 b = lmb_alloc_addr(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE);
636 ut_asserteq(b, 0);
637 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000,
638 alloc_addr_a + 0x4000, 0x1000, 0, 0);
639
640 c = lmb_alloc_addr(alloc_addr_a + 0x1000, 0x5000, LMB_NONE);
641 ut_asserteq(c, -EEXIST);
642 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000,
643 alloc_addr_a + 0x4000, 0x1000, 0, 0);
644
645 ret = lmb_free(alloc_addr_a, 0x1000, LMB_NONE);
646 ut_asserteq(ret, 0);
647 ret = lmb_free(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE);
648 ut_asserteq(ret, 0);
649
650 /*
651 * Add two regions with same flags(LMB_NONE), region1 and region2
652 * with a gap between them.
653 * Try adding another region, adjacent to region 1 and overlapping
654 * region 2. Should succeed. All regions should coalesce into a
655 * single region.
656 */
657 a = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE);
658 ut_asserteq(a, 0);
659
660 b = lmb_alloc_addr(alloc_addr_a + 0x4000, 0x1000, LMB_NONE);
661 ut_asserteq(b, 0);
662 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000,
663 alloc_addr_a + 0x4000, 0x1000, 0, 0);
664
665 c = lmb_alloc_addr(alloc_addr_a + 0x1000, 0x5000, LMB_NONE);
666 ut_asserteq(c, 0);
667 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_addr_a, 0x6000,
668 0, 0, 0, 0);
669
670 ret = lmb_free(alloc_addr_a, 0x6000, LMB_NONE);
671 ut_asserteq(ret, 0);
672
673 /*
674 * Add two regions with same flags(LMB_NOOVERWRITE), region1 and
675 * region2 with a gap between them.
676 * Try adding another region, adjacent to region 1 and overlapping
677 * region 2. Should fail.
678 */
679 a = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
680 ut_asserteq(a, 0);
681
682 b = lmb_alloc_addr(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE);
683 ut_asserteq(b, 0);
684 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000,
685 alloc_addr_a + 0x4000, 0x1000, 0, 0);
686
687 c = lmb_alloc_addr(alloc_addr_a + 0x1000, 0x5000, LMB_NOOVERWRITE);
688 ut_asserteq(c, -EEXIST);
689 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000,
690 alloc_addr_a + 0x4000, 0x1000, 0, 0);
691
692 ret = lmb_free(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
693 ut_asserteq(ret, 0);
694 ret = lmb_free(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE);
695 ut_asserteq(ret, 0);
696
697 /* reserve 3 blocks */
698 ret = lmb_reserve(alloc_addr_a, 0x10000, LMB_NONE);
699 ut_asserteq(ret, 0);
700 ret = lmb_reserve(alloc_addr_b, 0x10000, LMB_NONE);
701 ut_asserteq(ret, 0);
702 ret = lmb_reserve(alloc_addr_c, 0x10000, LMB_NONE);
703 ut_asserteq(ret, 0);
704 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, alloc_addr_a, 0x10000,
705 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
706
707 /* allocate blocks */
708 a = lmb_alloc_addr(ram, alloc_addr_a - ram, LMB_NONE);
709 ut_asserteq(a, 0);
710 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, ram, 0x8010000,
711 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
712 b = lmb_alloc_addr(alloc_addr_a + 0x10000,
713 alloc_addr_b - alloc_addr_a - 0x10000, LMB_NONE);
714 ut_asserteq(b, 0);
715 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x10010000,
716 alloc_addr_c, 0x10000, 0, 0);
717 c = lmb_alloc_addr(alloc_addr_b + 0x10000,
718 alloc_addr_c - alloc_addr_b - 0x10000, LMB_NONE);
719 ut_asserteq(c, 0);
720 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
721 0, 0, 0, 0);
722 d = lmb_alloc_addr(alloc_addr_c + 0x10000,
723 ram_end - alloc_addr_c - 0x10000, LMB_NONE);
724 ut_asserteq(d, 0);
725 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, ram_size,
726 0, 0, 0, 0);
727
728 /* allocating anything else should fail */
729 e = lmb_alloc(1, 1);
730 ut_asserteq(e, 0);
731 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, ram_size,
732 0, 0, 0, 0);
733
734 /* free thge allocation from d */
735 ret = lmb_free(alloc_addr_c + 0x10000, ram_end - alloc_addr_c - 0x10000,
736 LMB_NONE);
737 ut_asserteq(ret, 0);
738
739 /* allocate at 3 points in free range */
740
741 d = lmb_alloc_addr(ram_end - 4, 4, LMB_NONE);
742 ut_asserteq(d, 0);
743 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000,
744 ram_end - 4, 4, 0, 0);
745 ret = lmb_free(ram_end - 4, 4, LMB_NONE);
746 ut_asserteq(ret, 0);
747 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
748 0, 0, 0, 0);
749
750 d = lmb_alloc_addr(ram_end - 128, 4, LMB_NONE);
751 ut_asserteq(d, 0);
752 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000,
753 ram_end - 128, 4, 0, 0);
754 ret = lmb_free(ram_end - 128, 4, LMB_NONE);
755 ut_asserteq(ret, 0);
756 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
757 0, 0, 0, 0);
758
759 d = lmb_alloc_addr(alloc_addr_c + 0x10000, 4, LMB_NONE);
760 ut_asserteq(d, 0);
761 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010004,
762 0, 0, 0, 0);
763 ret = lmb_free(alloc_addr_c + 0x10000, 4, LMB_NONE);
764 ut_asserteq(ret, 0);
765 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
766 0, 0, 0, 0);
767
768 /* allocate at the bottom a was assigned to ram at the top */
769 ret = lmb_free(ram, alloc_addr_a - ram, LMB_NONE);
770 ut_asserteq(ret, 0);
771 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram + 0x8000000,
772 0x10010000, 0, 0, 0, 0);
773
774 d = lmb_alloc_addr(ram, 4, LMB_NONE);
775 ut_asserteq(d, 0);
776 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 4,
777 ram + 0x8000000, 0x10010000, 0, 0);
778
779 /* check that allocating outside memory fails */
780 if (ram_end != 0) {
781 ret = lmb_alloc_addr(ram_end, 1, LMB_NONE);
782 ut_asserteq(ret, -EINVAL);
783 }
784 if (ram != 0) {
785 ret = lmb_alloc_addr(ram - 1, 1, LMB_NONE);
786 ut_asserteq(ret, -EINVAL);
787 }
788
789 lmb_pop(&store);
790
791 return 0;
792 }
793
lib_test_lmb_alloc_addr(struct unit_test_state * uts)794 static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
795 {
796 int ret;
797
798 /* simulate 512 MiB RAM beginning at 1GiB */
799 ret = test_alloc_addr(uts, 0x40000000);
800 if (ret)
801 return ret;
802
803 /* simulate 512 MiB RAM beginning at 1.5GiB */
804 return test_alloc_addr(uts, 0xE0000000);
805 }
806 LIB_TEST(lib_test_lmb_alloc_addr, 0);
807
808 /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
test_get_unreserved_size(struct unit_test_state * uts,const phys_addr_t ram)809 static int test_get_unreserved_size(struct unit_test_state *uts,
810 const phys_addr_t ram)
811 {
812 struct lmb store;
813 struct alist *mem_lst, *used_lst;
814 const phys_size_t ram_size = 0x20000000;
815 const phys_addr_t ram_end = ram + ram_size;
816 const phys_size_t alloc_addr_a = ram + 0x8000000;
817 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
818 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
819 long ret;
820 phys_size_t s;
821
822 /* check for overflow */
823 ut_assert(ram_end == 0 || ram_end > ram);
824 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
825
826 ret = lmb_add(ram, ram_size);
827 ut_asserteq(ret, 0);
828
829 /* reserve 3 blocks */
830 ret = lmb_reserve(alloc_addr_a, 0x10000, LMB_NONE);
831 ut_asserteq(ret, 0);
832 ret = lmb_reserve(alloc_addr_b, 0x10000, LMB_NONE);
833 ut_asserteq(ret, 0);
834 ret = lmb_reserve(alloc_addr_c, 0x10000, LMB_NONE);
835 ut_asserteq(ret, 0);
836 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, alloc_addr_a, 0x10000,
837 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
838
839 /* check addresses in between blocks */
840 s = lmb_get_free_size(ram);
841 ut_asserteq(s, alloc_addr_a - ram);
842 s = lmb_get_free_size(ram + 0x10000);
843 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
844 s = lmb_get_free_size(alloc_addr_a - 4);
845 ut_asserteq(s, 4);
846
847 s = lmb_get_free_size(alloc_addr_a + 0x10000);
848 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
849 s = lmb_get_free_size(alloc_addr_a + 0x20000);
850 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
851 s = lmb_get_free_size(alloc_addr_b - 4);
852 ut_asserteq(s, 4);
853
854 s = lmb_get_free_size(alloc_addr_c + 0x10000);
855 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
856 s = lmb_get_free_size(alloc_addr_c + 0x20000);
857 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
858 s = lmb_get_free_size(ram_end - 4);
859 ut_asserteq(s, 4);
860
861 lmb_pop(&store);
862
863 return 0;
864 }
865
lib_test_lmb_get_free_size(struct unit_test_state * uts)866 static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
867 {
868 int ret;
869
870 /* simulate 512 MiB RAM beginning at 1GiB */
871 ret = test_get_unreserved_size(uts, 0x40000000);
872 if (ret)
873 return ret;
874
875 /* simulate 512 MiB RAM beginning at 1.5GiB */
876 return test_get_unreserved_size(uts, 0xE0000000);
877 }
878 LIB_TEST(lib_test_lmb_get_free_size, 0);
879
lib_test_lmb_flags(struct unit_test_state * uts)880 static int lib_test_lmb_flags(struct unit_test_state *uts)
881 {
882 struct lmb store;
883 struct lmb_region *mem, *used;
884 struct alist *mem_lst, *used_lst;
885 const phys_addr_t ram = 0x40000000;
886 const phys_size_t ram_size = 0x20000000;
887 long ret;
888
889 ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
890 mem = mem_lst->data;
891 used = used_lst->data;
892
893 ret = lmb_add(ram, ram_size);
894 ut_asserteq(ret, 0);
895
896 /* reserve, same flag */
897 ret = lmb_reserve(0x40010000, 0x10000, LMB_NOMAP);
898 ut_asserteq(ret, 0);
899 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
900 0, 0, 0, 0);
901
902 /* reserve again, same flag */
903 ret = lmb_reserve(0x40010000, 0x10000, LMB_NOMAP);
904 ut_asserteq(ret, -EEXIST);
905 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
906 0, 0, 0, 0);
907
908 /* reserve again, new flag */
909 ret = lmb_reserve(0x40010000, 0x10000, LMB_NONE);
910 ut_asserteq(ret, -EEXIST);
911 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
912 0, 0, 0, 0);
913
914 ut_asserteq(lmb_is_nomap(&used[0]), 1);
915
916 /* merge after */
917 ret = lmb_reserve(0x40020000, 0x10000, LMB_NOMAP);
918 ut_asserteq(ret, 0);
919 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x20000,
920 0, 0, 0, 0);
921
922 /* merge before */
923 ret = lmb_reserve(0x40000000, 0x10000, LMB_NOMAP);
924 ut_asserteq(ret, 0);
925 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40000000, 0x30000,
926 0, 0, 0, 0);
927
928 ut_asserteq(lmb_is_nomap(&used[0]), 1);
929
930 ret = lmb_reserve(0x40030000, 0x10000, LMB_NONE);
931 ut_asserteq(ret, 0);
932 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x30000,
933 0x40030000, 0x10000, 0, 0);
934
935 ut_asserteq(lmb_is_nomap(&used[0]), 1);
936 ut_asserteq(lmb_is_nomap(&used[1]), 0);
937
938 /* test that old API use LMB_NONE */
939 ret = lmb_reserve(0x40040000, 0x10000, LMB_NONE);
940 ut_asserteq(ret, 0);
941 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x30000,
942 0x40030000, 0x20000, 0, 0);
943
944 ut_asserteq(lmb_is_nomap(&used[0]), 1);
945 ut_asserteq(lmb_is_nomap(&used[1]), 0);
946
947 ret = lmb_reserve(0x40070000, 0x10000, LMB_NOMAP);
948 ut_asserteq(ret, 0);
949 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, 0x40000000, 0x30000,
950 0x40030000, 0x20000, 0x40070000, 0x10000);
951
952 ret = lmb_reserve(0x40050000, 0x10000, LMB_NOMAP);
953 ut_asserteq(ret, 0);
954 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 4, 0x40000000, 0x30000,
955 0x40030000, 0x20000, 0x40050000, 0x10000);
956
957 /* merge with 2 adjacent regions */
958 ret = lmb_reserve(0x40060000, 0x10000, LMB_NOMAP);
959 ut_asserteq(ret, 0);
960 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, 0x40000000, 0x30000,
961 0x40030000, 0x20000, 0x40050000, 0x30000);
962
963 ut_asserteq(lmb_is_nomap(&used[0]), 1);
964 ut_asserteq(lmb_is_nomap(&used[1]), 0);
965 ut_asserteq(lmb_is_nomap(&used[2]), 1);
966
967 lmb_pop(&store);
968
969 return 0;
970 }
971 LIB_TEST(lib_test_lmb_flags, 0);
972