1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9 #include <common.h>
10 #include <efi_loader.h>
11 #include <image.h>
12 #include <mapmem.h>
13 #include <lmb.h>
14 #include <log.h>
15 #include <malloc.h>
16
17 #include <asm/global_data.h>
18 #include <asm/sections.h>
19
20 DECLARE_GLOBAL_DATA_PTR;
21
22 #define LMB_ALLOC_ANYWHERE 0
23
lmb_dump_region(struct lmb_region * rgn,char * name)24 static void lmb_dump_region(struct lmb_region *rgn, char *name)
25 {
26 unsigned long long base, size, end;
27 enum lmb_flags flags;
28 int i;
29
30 printf(" %s.cnt = 0x%lx / max = 0x%lx\n", name, rgn->cnt, rgn->max);
31
32 for (i = 0; i < rgn->cnt; i++) {
33 base = rgn->region[i].base;
34 size = rgn->region[i].size;
35 end = base + size - 1;
36 flags = rgn->region[i].flags;
37
38 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
39 name, i, base, end, size, flags);
40 }
41 }
42
lmb_dump_all_force(struct lmb * lmb)43 void lmb_dump_all_force(struct lmb *lmb)
44 {
45 printf("lmb_dump_all:\n");
46 lmb_dump_region(&lmb->memory, "memory");
47 lmb_dump_region(&lmb->reserved, "reserved");
48 }
49
lmb_dump_all(struct lmb * lmb)50 void lmb_dump_all(struct lmb *lmb)
51 {
52 #ifdef DEBUG
53 lmb_dump_all_force(lmb);
54 #endif
55 }
56
lmb_addrs_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)57 static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
58 phys_addr_t base2, phys_size_t size2)
59 {
60 const phys_addr_t base1_end = base1 + size1 - 1;
61 const phys_addr_t base2_end = base2 + size2 - 1;
62
63 return ((base1 <= base2_end) && (base2 <= base1_end));
64 }
65
lmb_addrs_adjacent(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)66 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
67 phys_addr_t base2, phys_size_t size2)
68 {
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75 }
76
lmb_regions_adjacent(struct lmb_region * rgn,unsigned long r1,unsigned long r2)77 static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
78 unsigned long r2)
79 {
80 phys_addr_t base1 = rgn->region[r1].base;
81 phys_size_t size1 = rgn->region[r1].size;
82 phys_addr_t base2 = rgn->region[r2].base;
83 phys_size_t size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86 }
87
lmb_remove_region(struct lmb_region * rgn,unsigned long r)88 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89 {
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 rgn->region[i].flags = rgn->region[i + 1].flags;
96 }
97 rgn->cnt--;
98 }
99
100 /* Assumption: base addr of region 1 < base addr of region 2 */
lmb_coalesce_regions(struct lmb_region * rgn,unsigned long r1,unsigned long r2)101 static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
102 unsigned long r2)
103 {
104 rgn->region[r1].size += rgn->region[r2].size;
105 lmb_remove_region(rgn, r2);
106 }
107
lmb_init(struct lmb * lmb)108 void lmb_init(struct lmb *lmb)
109 {
110 #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
111 lmb->memory.max = CONFIG_LMB_MAX_REGIONS;
112 lmb->reserved.max = CONFIG_LMB_MAX_REGIONS;
113 #else
114 lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS;
115 lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS;
116 lmb->memory.region = lmb->memory_regions;
117 lmb->reserved.region = lmb->reserved_regions;
118 #endif
119 lmb->memory.cnt = 0;
120 lmb->reserved.cnt = 0;
121 }
122
arch_lmb_reserve_generic(struct lmb * lmb,ulong sp,ulong end,ulong align)123 void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
124 {
125 ulong bank_end;
126 int bank;
127
128 /*
129 * Reserve memory from aligned address below the bottom of U-Boot stack
130 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
131 * that memory.
132 */
133 debug("## Current stack ends at 0x%08lx ", sp);
134
135 /* adjust sp by 4K to be safe */
136 sp -= align;
137 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
138 if (!gd->bd->bi_dram[bank].size ||
139 sp < gd->bd->bi_dram[bank].start)
140 continue;
141 /* Watch out for RAM at end of address space! */
142 bank_end = gd->bd->bi_dram[bank].start +
143 gd->bd->bi_dram[bank].size - 1;
144 if (sp > bank_end)
145 continue;
146 if (bank_end > end)
147 bank_end = end - 1;
148
149 lmb_reserve(lmb, sp, bank_end - sp + 1);
150
151 if (gd->flags & GD_FLG_SKIP_RELOC)
152 lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len);
153
154 break;
155 }
156 }
157
158 /**
159 * efi_lmb_reserve() - add reservations for EFI memory
160 *
161 * Add reservations for all EFI memory areas that are not
162 * EFI_CONVENTIONAL_MEMORY.
163 *
164 * @lmb: lmb environment
165 * Return: 0 on success, 1 on failure
166 */
efi_lmb_reserve(struct lmb * lmb)167 static __maybe_unused int efi_lmb_reserve(struct lmb *lmb)
168 {
169 struct efi_mem_desc *memmap = NULL, *map;
170 efi_uintn_t i, map_size = 0;
171 efi_status_t ret;
172
173 ret = efi_get_memory_map_alloc(&map_size, &memmap);
174 if (ret != EFI_SUCCESS)
175 return 1;
176
177 for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
178 if (map->type != EFI_CONVENTIONAL_MEMORY) {
179 lmb_reserve_flags(lmb,
180 map_to_sysmem((void *)(uintptr_t)
181 map->physical_start),
182 map->num_pages * EFI_PAGE_SIZE,
183 map->type == EFI_RESERVED_MEMORY_TYPE
184 ? LMB_NOMAP : LMB_NONE);
185 }
186 }
187 efi_free_pool(memmap);
188
189 return 0;
190 }
191
lmb_reserve_common(struct lmb * lmb,void * fdt_blob)192 static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
193 {
194 arch_lmb_reserve(lmb);
195 board_lmb_reserve(lmb);
196
197 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
198 boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
199
200 if (CONFIG_IS_ENABLED(EFI_LOADER))
201 efi_lmb_reserve(lmb);
202 }
203
204 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve(struct lmb * lmb,struct bd_info * bd,void * fdt_blob)205 void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
206 {
207 int i;
208
209 lmb_init(lmb);
210
211 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
212 if (bd->bi_dram[i].size) {
213 lmb_add(lmb, bd->bi_dram[i].start,
214 bd->bi_dram[i].size);
215 }
216 }
217
218 lmb_reserve_common(lmb, fdt_blob);
219 }
220
221 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve_range(struct lmb * lmb,phys_addr_t base,phys_size_t size,void * fdt_blob)222 void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
223 phys_size_t size, void *fdt_blob)
224 {
225 lmb_init(lmb);
226 lmb_add(lmb, base, size);
227 lmb_reserve_common(lmb, fdt_blob);
228 }
229
230 /* This routine called with relocation disabled. */
lmb_add_region_flags(struct lmb_region * rgn,phys_addr_t base,phys_size_t size,enum lmb_flags flags)231 static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
232 phys_size_t size, enum lmb_flags flags)
233 {
234 unsigned long coalesced = 0;
235 long adjacent, i;
236
237 if (rgn->cnt == 0) {
238 rgn->region[0].base = base;
239 rgn->region[0].size = size;
240 rgn->region[0].flags = flags;
241 rgn->cnt = 1;
242 return 0;
243 }
244
245 /* First try and coalesce this LMB with another. */
246 for (i = 0; i < rgn->cnt; i++) {
247 phys_addr_t rgnbase = rgn->region[i].base;
248 phys_size_t rgnsize = rgn->region[i].size;
249 phys_size_t rgnflags = rgn->region[i].flags;
250 phys_addr_t end = base + size - 1;
251 phys_addr_t rgnend = rgnbase + rgnsize - 1;
252
253 if (rgnbase <= base && end <= rgnend) {
254 if (flags == rgnflags)
255 /* Already have this region, so we're done */
256 return 0;
257 else
258 return -1; /* regions with new flags */
259 }
260
261 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
262 if (adjacent > 0) {
263 if (flags != rgnflags)
264 break;
265 rgn->region[i].base -= size;
266 rgn->region[i].size += size;
267 coalesced++;
268 break;
269 } else if (adjacent < 0) {
270 if (flags != rgnflags)
271 break;
272 rgn->region[i].size += size;
273 coalesced++;
274 break;
275 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
276 /* regions overlap */
277 return -1;
278 }
279 }
280
281 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
282 if (rgn->region[i].flags == rgn->region[i + 1].flags) {
283 lmb_coalesce_regions(rgn, i, i + 1);
284 coalesced++;
285 }
286 }
287
288 if (coalesced)
289 return coalesced;
290 if (rgn->cnt >= rgn->max)
291 return -1;
292
293 /* Couldn't coalesce the LMB, so add it to the sorted table. */
294 for (i = rgn->cnt-1; i >= 0; i--) {
295 if (base < rgn->region[i].base) {
296 rgn->region[i + 1].base = rgn->region[i].base;
297 rgn->region[i + 1].size = rgn->region[i].size;
298 rgn->region[i + 1].flags = rgn->region[i].flags;
299 } else {
300 rgn->region[i + 1].base = base;
301 rgn->region[i + 1].size = size;
302 rgn->region[i + 1].flags = flags;
303 break;
304 }
305 }
306
307 if (base < rgn->region[0].base) {
308 rgn->region[0].base = base;
309 rgn->region[0].size = size;
310 rgn->region[0].flags = flags;
311 }
312
313 rgn->cnt++;
314
315 return 0;
316 }
317
lmb_add_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)318 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
319 phys_size_t size)
320 {
321 return lmb_add_region_flags(rgn, base, size, LMB_NONE);
322 }
323
324 /* This routine may be called with relocation disabled. */
lmb_add(struct lmb * lmb,phys_addr_t base,phys_size_t size)325 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
326 {
327 struct lmb_region *_rgn = &(lmb->memory);
328
329 return lmb_add_region(_rgn, base, size);
330 }
331
lmb_free(struct lmb * lmb,phys_addr_t base,phys_size_t size)332 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
333 {
334 struct lmb_region *rgn = &(lmb->reserved);
335 phys_addr_t rgnbegin, rgnend;
336 phys_addr_t end = base + size - 1;
337 int i;
338
339 rgnbegin = rgnend = 0; /* supress gcc warnings */
340
341 /* Find the region where (base, size) belongs to */
342 for (i = 0; i < rgn->cnt; i++) {
343 rgnbegin = rgn->region[i].base;
344 rgnend = rgnbegin + rgn->region[i].size - 1;
345
346 if ((rgnbegin <= base) && (end <= rgnend))
347 break;
348 }
349
350 /* Didn't find the region */
351 if (i == rgn->cnt)
352 return -1;
353
354 /* Check to see if we are removing entire region */
355 if ((rgnbegin == base) && (rgnend == end)) {
356 lmb_remove_region(rgn, i);
357 return 0;
358 }
359
360 /* Check to see if region is matching at the front */
361 if (rgnbegin == base) {
362 rgn->region[i].base = end + 1;
363 rgn->region[i].size -= size;
364 return 0;
365 }
366
367 /* Check to see if the region is matching at the end */
368 if (rgnend == end) {
369 rgn->region[i].size -= size;
370 return 0;
371 }
372
373 /*
374 * We need to split the entry - adjust the current one to the
375 * beginging of the hole and add the region after hole.
376 */
377 rgn->region[i].size = base - rgn->region[i].base;
378 return lmb_add_region_flags(rgn, end + 1, rgnend - end,
379 rgn->region[i].flags);
380 }
381
lmb_reserve_flags(struct lmb * lmb,phys_addr_t base,phys_size_t size,enum lmb_flags flags)382 long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
383 enum lmb_flags flags)
384 {
385 struct lmb_region *_rgn = &(lmb->reserved);
386
387 return lmb_add_region_flags(_rgn, base, size, flags);
388 }
389
lmb_reserve(struct lmb * lmb,phys_addr_t base,phys_size_t size)390 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
391 {
392 return lmb_reserve_flags(lmb, base, size, LMB_NONE);
393 }
394
lmb_overlaps_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)395 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
396 phys_size_t size)
397 {
398 unsigned long i;
399
400 for (i = 0; i < rgn->cnt; i++) {
401 phys_addr_t rgnbase = rgn->region[i].base;
402 phys_size_t rgnsize = rgn->region[i].size;
403 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
404 break;
405 }
406
407 return (i < rgn->cnt) ? i : -1;
408 }
409
lmb_alloc(struct lmb * lmb,phys_size_t size,ulong align)410 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
411 {
412 return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
413 }
414
lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)415 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
416 {
417 phys_addr_t alloc;
418
419 alloc = __lmb_alloc_base(lmb, size, align, max_addr);
420
421 if (alloc == 0)
422 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
423 (ulong)size, (ulong)max_addr);
424
425 return alloc;
426 }
427
lmb_align_down(phys_addr_t addr,phys_size_t size)428 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
429 {
430 return addr & ~(size - 1);
431 }
432
__lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)433 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
434 {
435 long i, rgn;
436 phys_addr_t base = 0;
437 phys_addr_t res_base;
438
439 for (i = lmb->memory.cnt - 1; i >= 0; i--) {
440 phys_addr_t lmbbase = lmb->memory.region[i].base;
441 phys_size_t lmbsize = lmb->memory.region[i].size;
442
443 if (lmbsize < size)
444 continue;
445 if (max_addr == LMB_ALLOC_ANYWHERE)
446 base = lmb_align_down(lmbbase + lmbsize - size, align);
447 else if (lmbbase < max_addr) {
448 base = lmbbase + lmbsize;
449 if (base < lmbbase)
450 base = -1;
451 base = min(base, max_addr);
452 base = lmb_align_down(base - size, align);
453 } else
454 continue;
455
456 while (base && lmbbase <= base) {
457 rgn = lmb_overlaps_region(&lmb->reserved, base, size);
458 if (rgn < 0) {
459 /* This area isn't reserved, take it */
460 if (lmb_add_region(&lmb->reserved, base,
461 size) < 0)
462 return 0;
463 return base;
464 }
465 res_base = lmb->reserved.region[rgn].base;
466 if (res_base < size)
467 break;
468 base = lmb_align_down(res_base - size, align);
469 }
470 }
471 return 0;
472 }
473
474 /*
475 * Try to allocate a specific address range: must be in defined memory but not
476 * reserved
477 */
lmb_alloc_addr(struct lmb * lmb,phys_addr_t base,phys_size_t size)478 phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
479 {
480 long rgn;
481
482 /* Check if the requested address is in one of the memory regions */
483 rgn = lmb_overlaps_region(&lmb->memory, base, size);
484 if (rgn >= 0) {
485 /*
486 * Check if the requested end address is in the same memory
487 * region we found.
488 */
489 if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
490 lmb->memory.region[rgn].size,
491 base + size - 1, 1)) {
492 /* ok, reserve the memory */
493 if (lmb_reserve(lmb, base, size) >= 0)
494 return base;
495 }
496 }
497 return 0;
498 }
499
500 /* Return number of bytes from a given address that are free */
lmb_get_free_size(struct lmb * lmb,phys_addr_t addr)501 phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
502 {
503 int i;
504 long rgn;
505
506 /* check if the requested address is in the memory regions */
507 rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
508 if (rgn >= 0) {
509 for (i = 0; i < lmb->reserved.cnt; i++) {
510 if (addr < lmb->reserved.region[i].base) {
511 /* first reserved range > requested address */
512 return lmb->reserved.region[i].base - addr;
513 }
514 if (lmb->reserved.region[i].base +
515 lmb->reserved.region[i].size > addr) {
516 /* requested addr is in this reserved range */
517 return 0;
518 }
519 }
520 /* if we come here: no reserved ranges above requested addr */
521 return lmb->memory.region[lmb->memory.cnt - 1].base +
522 lmb->memory.region[lmb->memory.cnt - 1].size - addr;
523 }
524 return 0;
525 }
526
lmb_is_reserved_flags(struct lmb * lmb,phys_addr_t addr,int flags)527 int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
528 {
529 int i;
530
531 for (i = 0; i < lmb->reserved.cnt; i++) {
532 phys_addr_t upper = lmb->reserved.region[i].base +
533 lmb->reserved.region[i].size - 1;
534 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
535 return (lmb->reserved.region[i].flags & flags) == flags;
536 }
537 return 0;
538 }
539
lmb_is_reserved(struct lmb * lmb,phys_addr_t addr)540 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
541 {
542 return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
543 }
544
board_lmb_reserve(struct lmb * lmb)545 __weak void board_lmb_reserve(struct lmb *lmb)
546 {
547 /* please define platform specific board_lmb_reserve() */
548 }
549
arch_lmb_reserve(struct lmb * lmb)550 __weak void arch_lmb_reserve(struct lmb *lmb)
551 {
552 /* please define platform specific arch_lmb_reserve() */
553 }
554