Lines Matching refs:size
31 _kernel_aspace.size = KERNEL_ASPACE_SIZE; in vmm_init_preheap()
44 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in is_inside_aspace()
47 static bool is_region_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) { in is_region_inside_aspace() argument
52 if (size == 0) in is_region_inside_aspace()
56 if (vaddr + size - 1 < vaddr) in is_region_inside_aspace()
60 if (vaddr + size - 1 > aspace->base + aspace->size - 1) in is_region_inside_aspace()
66 static size_t trim_to_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) { in trim_to_aspace() argument
69 if (size == 0) in trim_to_aspace()
70 return size; in trim_to_aspace()
77 if (offset + size < offset) in trim_to_aspace()
78 size = ULONG_MAX - offset - 1; in trim_to_aspace()
82 if (offset + size >= aspace->size - 1) in trim_to_aspace()
83 size = aspace->size - offset; in trim_to_aspace()
87 return size; in trim_to_aspace()
90 static vmm_region_t *alloc_region_struct(const char *name, vaddr_t base, size_t size, in alloc_region_struct() argument
100 r->size = size; in alloc_region_struct()
115 aspace, aspace->base, aspace->size, r, r->base, r->size); in add_region_to_aspace()
118 if (r->size == 0 || !is_region_inside_aspace(aspace, r->base, r->size)) { in add_region_to_aspace()
123 vaddr_t r_end = r->base + r->size - 1; in add_region_to_aspace()
137 if (r->base > last->base + last->size - 1) { in add_region_to_aspace()
159 vaddr_t align, size_t size, uint arch_mmu_flags) { in arch_mmu_pick_spot() argument
169 vaddr_t *pva, vaddr_t align, size_t size, in check_gap() argument
177 gap_beg = prev->base + prev->size; in check_gap()
186 if (gap_beg == (aspace->base + aspace->size)) in check_gap()
188 gap_end = aspace->base + aspace->size - 1; in check_gap()
193 align, size, arch_mmu_flags); in check_gap()
197 if (*pva < gap_end && ((gap_end - *pva + 1) >= size)) { in check_gap()
210 static vaddr_t alloc_spot(vmm_aspace_t *aspace, size_t size, uint8_t align_pow2, in alloc_spot() argument
213 DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size)); in alloc_spot()
215 LTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2); in alloc_spot()
227 &spot, align, size, arch_mmu_flags)) in alloc_spot()
234 &spot, align, size, arch_mmu_flags)) in alloc_spot()
248 static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t size, in alloc_region() argument
252 vmm_region_t *r = alloc_region_struct(name, vaddr, size, region_flags, arch_mmu_flags); in alloc_region()
268 vaddr = alloc_spot(aspace, size, align_pow2, arch_mmu_flags, &before); in alloc_region()
288 status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr) { in vmm_reserve_space() argument
289 LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr); in vmm_reserve_space()
293 DEBUG_ASSERT(IS_PAGE_ALIGNED(size)); in vmm_reserve_space()
300 if (size == 0) in vmm_reserve_space()
302 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size)) in vmm_reserve_space()
309 size = trim_to_aspace(aspace, vaddr, size); in vmm_reserve_space()
318 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, 0, in vmm_reserve_space()
325 status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, in vmm_alloc_physical() argument
330 aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags); in vmm_alloc_physical()
334 DEBUG_ASSERT(IS_PAGE_ALIGNED(size)); in vmm_alloc_physical()
341 if (size == 0) in vmm_alloc_physical()
343 if (!IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size)) in vmm_alloc_physical()
360 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_log2, vmm_flags, in vmm_alloc_physical()
372 int err = arch_mmu_map(&aspace->arch_aspace, r->base, paddr, size / PAGE_SIZE, arch_mmu_flags); in vmm_alloc_physical()
382 status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, in vmm_alloc_contiguous() argument
387 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags); in vmm_alloc_contiguous()
391 size = ROUNDUP(size, PAGE_SIZE); in vmm_alloc_contiguous()
392 if (size == 0) in vmm_alloc_contiguous()
416 size_t count = pmm_alloc_contiguous(size / PAGE_SIZE, align_pow2, &pa, &page_list); in vmm_alloc_contiguous()
417 if (count < size / PAGE_SIZE) { in vmm_alloc_contiguous()
426 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags, in vmm_alloc_contiguous()
438 arch_mmu_map(&aspace->arch_aspace, r->base, pa, size / PAGE_SIZE, arch_mmu_flags); in vmm_alloc_contiguous()
456 status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, in vmm_alloc() argument
461 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags); in vmm_alloc()
465 size = ROUNDUP(size, PAGE_SIZE); in vmm_alloc()
466 if (size == 0) in vmm_alloc()
490 size_t count = pmm_alloc_pages(size / PAGE_SIZE, &page_list); in vmm_alloc()
491 DEBUG_ASSERT(count <= size); in vmm_alloc()
492 if (count < size / PAGE_SIZE) { in vmm_alloc()
493 … LTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count); in vmm_alloc()
502 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags, in vmm_alloc()
519 DEBUG_ASSERT(va <= r->base + r->size - 1); in vmm_alloc()
552 if ((vaddr >= r->base) && (vaddr <= r->base + r->size - 1)) in vmm_find_region()
572 arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE); in vmm_free_region()
601 aspace->size = KERNEL_ASPACE_SIZE; in vmm_create_aspace()
604 aspace->size = USER_ASPACE_SIZE; in vmm_create_aspace()
608 err = arch_mmu_init_aspace(&aspace->arch_aspace, aspace->base, aspace->size, in vmm_create_aspace()
645 arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE); in vmm_free_aspace()
701 r, r->name, r->base, r->base + r->size - 1, r->size, r->flags, r->arch_mmu_flags); in dump_region()
706 a, a->name, a->base, a->base + a->size - 1, a->size, a->flags); in dump_aspace()