Lines Matching refs:size
75 phys_size_t size1 = rgn[r1].size; in lmb_regions_check()
77 phys_size_t size2 = rgn[r2].size; in lmb_regions_check()
97 rgn[i].size = rgn[i + 1].size; in lmb_remove_region()
109 rgn[r1].size += rgn[r2].size; in lmb_coalesce_regions()
115 phys_addr_t base, phys_size_t size) in lmb_resize_regions() argument
136 rgnsize = rgn[idx].size; in lmb_resize_regions()
138 if (lmb_addrs_overlap(base, size, rgnbase, in lmb_resize_regions()
151 rgnend = rgn[idx_end].base + rgn[idx_end].size; in lmb_resize_regions()
152 mergeend = max(rgnend, (base + size)); in lmb_resize_regions()
155 rgn[idx_start].size = mergeend - mergebase; in lmb_resize_regions()
182 phys_size_t size, u32 flags) in lmb_add_region_flags() argument
194 phys_size_t rgnsize = rgn[i].size; in lmb_add_region_flags()
197 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); in lmb_add_region_flags()
201 rgn[i].base -= size; in lmb_add_region_flags()
202 rgn[i].size += size; in lmb_add_region_flags()
208 rgn[i].size += size; in lmb_add_region_flags()
211 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { in lmb_add_region_flags()
212 ret = lmb_resize_regions(lmb_rgn_lst, i, base, size); in lmb_add_region_flags()
229 phys_size_t rgnsize = rgn[i].size; in lmb_add_region_flags()
254 rgn[i].size = size; in lmb_add_region_flags()
266 phys_size_t size) in _lmb_free() argument
270 phys_addr_t end = base + size - 1; in _lmb_free()
281 rgnend = rgnbegin + rgn[i].size - 1; in _lmb_free()
300 rgn[i].size -= size; in _lmb_free()
306 rgn[i].size -= size; in _lmb_free()
314 rgn[i].size = base - rgn[i].base; in _lmb_free()
346 phys_size_t size, u32 flags, bool alloc) in lmb_overlap_checks() argument
353 phys_size_t rgnsize = rgn[i].size; in lmb_overlap_checks()
356 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { in lmb_overlap_checks()
398 long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size) in io_lmb_add() argument
400 return lmb_add_region_flags(&io_lmb->available_mem, base, size, LMB_NONE); in io_lmb_add()
404 phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align) in io_lmb_alloc() argument
414 phys_size_t lmbsize = lmb_memory[i].size; in io_lmb_alloc()
416 if (lmbsize < size) in io_lmb_alloc()
418 base = ALIGN_DOWN(lmbbase + lmbsize - size, align); in io_lmb_alloc()
421 rgn = lmb_overlap_checks(&io_lmb->used_mem, base, size, in io_lmb_alloc()
426 size, LMB_NONE) < 0) in io_lmb_alloc()
433 if (res_base < size) in io_lmb_alloc()
435 base = ALIGN_DOWN(res_base - size, align); in io_lmb_alloc()
441 long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size) in io_lmb_free() argument
443 return _lmb_free(&io_lmb->used_mem, base, size); in io_lmb_free()
454 static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size, in lmb_map_update_notify() argument
459 return efi_map_update_notify(addr, size, op); in lmb_map_update_notify()
488 unsigned long long base, size, end; in lmb_dump_region() local
496 size = rgn[i].size; in lmb_dump_region()
497 end = base + size - 1; in lmb_dump_region()
501 name, i, base, end, size); in lmb_dump_region()
520 static long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags) in lmb_reserve() argument
525 if (lmb_overlap_checks(lmb_rgn_lst, base, size, flags, false) != -1) in lmb_reserve()
528 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags); in lmb_reserve()
532 return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags); in lmb_reserve()
557 if (!gd->bd->bi_dram[bank].size || in lmb_reserve_uboot_region()
562 gd->bd->bi_dram[bank].size - 1; in lmb_reserve_uboot_region()
615 phys_size_t size; in lmb_add_memory() local
627 size = bd->bi_dram[i].size; in lmb_add_memory()
628 bank_end = bd->bi_dram[i].start + size; in lmb_add_memory()
630 if (size) { in lmb_add_memory()
631 lmb_add(bd->bi_dram[i].start, size); in lmb_add_memory()
639 lmb_reserve(bd->bi_dram[i].start, size, in lmb_add_memory()
649 long lmb_add(phys_addr_t base, phys_size_t size) in lmb_add() argument
654 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE); in lmb_add()
658 return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE); in lmb_add()
661 long lmb_free(phys_addr_t base, phys_size_t size, u32 flags) in lmb_free() argument
665 ret = _lmb_free(&lmb.used_mem, base, size); in lmb_free()
669 return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags); in lmb_free()
672 static int _lmb_alloc_base(phys_size_t size, ulong align, in _lmb_alloc_base() argument
686 phys_size_t lmbsize = lmb_memory[i].size; in _lmb_alloc_base()
688 if (lmbsize < size) in _lmb_alloc_base()
692 base = ALIGN_DOWN(lmbbase + lmbsize - size, align); in _lmb_alloc_base()
698 base = ALIGN_DOWN(base - size, align); in _lmb_alloc_base()
704 rgn = lmb_overlap_checks(&lmb.used_mem, base, size, in _lmb_alloc_base()
709 size, flags)) in _lmb_alloc_base()
712 ret = lmb_map_update_notify(base, size, in _lmb_alloc_base()
722 if (res_base < size) in _lmb_alloc_base()
724 base = ALIGN_DOWN(res_base - size, align); in _lmb_alloc_base()
729 __func__, (ulong)size, (ulong)max_addr); in _lmb_alloc_base()
734 static int _lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) in _lmb_alloc_addr() argument
740 rgn = lmb_overlap_checks(&lmb.available_mem, base, size, in _lmb_alloc_addr()
748 lmb_memory[rgn].size, in _lmb_alloc_addr()
749 base + size - 1, 1)) in _lmb_alloc_addr()
751 return lmb_reserve(base, size, flags); in _lmb_alloc_addr()
758 phys_size_t size, u32 flags) in lmb_alloc_mem() argument
762 if (!size) in lmb_alloc_mem()
773 ret = _lmb_alloc_base(size, align, addr, flags); in lmb_alloc_mem()
776 ret = _lmb_alloc_addr(*addr, size, flags); in lmb_alloc_mem()
804 lmb_used[i].size > addr) { in lmb_get_free_size()
811 lmb_memory[lmb.available_mem.count - 1].size - addr; in lmb_get_free_size()
823 lmb_used[i].size - 1; in lmb_is_reserved_flags()