Lines Matching refs:slab
139 (&slab->memusage[((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
220 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_page_alloc() local
225 for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next)) in rt_slab_page_alloc()
261 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_page_free() local
269 for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next)) in rt_slab_page_free()
304 static void rt_slab_page_init(struct rt_slab *slab, void *addr, rt_size_t npages) in rt_slab_page_init() argument
309 slab->page_list = RT_NULL; in rt_slab_page_init()
310 rt_slab_page_free((rt_slab_t)(&slab->parent), addr, npages); in rt_slab_page_init()
328 struct rt_slab *slab; in rt_slab_init() local
330 slab = (struct rt_slab *)RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE); in rt_slab_init()
331 start_addr = (rt_uintptr_t)slab + sizeof(*slab); in rt_slab_init()
347 rt_memset(slab, 0, sizeof(*slab)); in rt_slab_init()
349 rt_object_init(&(slab->parent.parent), RT_Object_Class_Memory, name); in rt_slab_init()
350 slab->parent.algorithm = "slab"; in rt_slab_init()
351 slab->parent.address = begin_align; in rt_slab_init()
352 slab->parent.total = limsize; in rt_slab_init()
353 slab->parent.used = 0; in rt_slab_init()
354 slab->parent.max = 0; in rt_slab_init()
355 slab->heap_start = begin_align; in rt_slab_init()
356 slab->heap_end = end_align; in rt_slab_init()
359 rt_slab_page_init(slab, (void *)slab->heap_start, npages); in rt_slab_init()
362 slab->zone_size = ZALLOC_MIN_ZONE_SIZE; in rt_slab_init()
363 while (slab->zone_size < ZALLOC_MAX_ZONE_SIZE && (slab->zone_size << 1) < (limsize / 1024)) in rt_slab_init()
364 slab->zone_size <<= 1; in rt_slab_init()
366 slab->zone_limit = slab->zone_size / 4; in rt_slab_init()
367 if (slab->zone_limit > ZALLOC_ZONE_LIMIT) in rt_slab_init()
368 slab->zone_limit = ZALLOC_ZONE_LIMIT; in rt_slab_init()
370 slab->zone_page_cnt = slab->zone_size / RT_MM_PAGE_SIZE; in rt_slab_init()
373 slab->zone_size, slab->zone_page_cnt); in rt_slab_init()
378 slab->memusage = rt_slab_page_alloc((rt_slab_t)(&slab->parent), limsize / RT_MM_PAGE_SIZE); in rt_slab_init()
381 (rt_uintptr_t)slab->memusage, limsize); in rt_slab_init()
382 return &slab->parent; in rt_slab_init()
395 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_detach() local
397 RT_ASSERT(slab != RT_NULL); in rt_slab_detach()
398 RT_ASSERT(rt_object_get_type(&slab->parent.parent) == RT_Object_Class_Memory); in rt_slab_detach()
399 RT_ASSERT(rt_object_is_systemobject(&slab->parent.parent)); in rt_slab_detach()
401 rt_object_detach(&(slab->parent.parent)); in rt_slab_detach()
496 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_alloc() local
506 if (size >= slab->zone_limit) in rt_slab_alloc()
522 ((rt_uintptr_t)chunk - slab->heap_start) >> RT_MM_PAGE_BITS); in rt_slab_alloc()
524 slab->parent.used += size; in rt_slab_alloc()
525 if (slab->parent.used > slab->parent.max) in rt_slab_alloc()
526 slab->parent.max = slab->parent.used; in rt_slab_alloc()
543 if ((z = slab->zone_array[zi]) != RT_NULL) in rt_slab_alloc()
550 slab->zone_array[zi] = z->z_next; in rt_slab_alloc()
574 slab->parent.used += z->z_chunksize; in rt_slab_alloc()
575 if (slab->parent.used > slab->parent.max) in rt_slab_alloc()
576 slab->parent.max = slab->parent.used; in rt_slab_alloc()
592 if ((z = slab->zone_free) != RT_NULL) in rt_slab_alloc()
595 slab->zone_free = z->z_next; in rt_slab_alloc()
596 -- slab->zone_free_cnt; in rt_slab_alloc()
601 z = rt_slab_page_alloc(m, slab->zone_size / RT_MM_PAGE_SIZE); in rt_slab_alloc()
611 for (off = 0, kup = btokup(z); off < slab->zone_page_cnt; off ++) in rt_slab_alloc()
637 z->z_nmax = (slab->zone_size - off) / size; in rt_slab_alloc()
646 z->z_next = slab->zone_array[zi]; in rt_slab_alloc()
647 slab->zone_array[zi] = z; in rt_slab_alloc()
649 slab->parent.used += z->z_chunksize; in rt_slab_alloc()
650 if (slab->parent.used > slab->parent.max) in rt_slab_alloc()
651 slab->parent.max = slab->parent.used; in rt_slab_alloc()
674 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_realloc() local
743 struct rt_slab *slab = (struct rt_slab *)m; in rt_slab_free() local
756 ((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS); in rt_slab_free()
770 slab->parent.used -= size * RT_MM_PAGE_SIZE; in rt_slab_free()
790 slab->parent.used -= z->z_chunksize; in rt_slab_free()
798 z->z_next = slab->zone_array[z->z_zoneindex]; in rt_slab_free()
799 slab->zone_array[z->z_zoneindex] = z; in rt_slab_free()
809 (z->z_next || slab->zone_array[z->z_zoneindex] != z)) in rt_slab_free()
817 for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) in rt_slab_free()
825 z->z_next = slab->zone_free; in rt_slab_free()
826 slab->zone_free = z; in rt_slab_free()
828 ++ slab->zone_free_cnt; in rt_slab_free()
831 if (slab->zone_free_cnt > ZONE_RELEASE_THRESH) in rt_slab_free()
835 z = slab->zone_free; in rt_slab_free()
836 slab->zone_free = z->z_next; in rt_slab_free()
837 -- slab->zone_free_cnt; in rt_slab_free()
840 for (i = 0, kup = btokup(z); i < slab->zone_page_cnt; i ++) in rt_slab_free()
848 rt_slab_page_free(m, z, slab->zone_size / RT_MM_PAGE_SIZE); in rt_slab_free()