Lines Matching refs:slab
221 struct kmem_cache_node *n, struct slab *slab,
241 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
375 const struct slab *slab, unsigned int idx) in index_to_obj() argument
377 return slab->s_mem + cache->size * idx; in index_to_obj()
552 struct slab *slab, void *objp) in cache_free_pfmemalloc() argument
558 slab_node = slab_nid(slab); in cache_free_pfmemalloc()
1352 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages()
1356 struct slab *slab; in kmem_getpages() local
1366 slab = folio_slab(folio); in kmem_getpages()
1368 account_slab(slab, cachep->gfporder, cachep, flags); in kmem_getpages()
1374 slab_set_pfmemalloc(slab); in kmem_getpages()
1376 return slab; in kmem_getpages()
1382 static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) in kmem_freepages() argument
1385 struct folio *folio = slab_folio(slab); in kmem_freepages()
1388 __slab_clear_pfmemalloc(slab); in kmem_freepages()
1397 unaccount_slab(slab, order, cachep); in kmem_freepages()
1404 struct slab *slab; in kmem_rcu_free() local
1406 slab = container_of(head, struct slab, rcu_head); in kmem_rcu_free()
1407 cachep = slab->slab_cache; in kmem_rcu_free()
1409 kmem_freepages(cachep, slab); in kmem_rcu_free()
1542 struct slab *slab = virt_to_slab(objp); in check_poison_obj() local
1545 objnr = obj_to_index(cachep, slab, objp); in check_poison_obj()
1547 objp = index_to_obj(cachep, slab, objnr - 1); in check_poison_obj()
1553 objp = index_to_obj(cachep, slab, objnr + 1); in check_poison_obj()
1564 struct slab *slab) in slab_destroy_debugcheck() argument
1569 poison_obj(cachep, slab->freelist - obj_offset(cachep), in slab_destroy_debugcheck()
1574 void *objp = index_to_obj(cachep, slab, i); in slab_destroy_debugcheck()
1590 struct slab *slab) in slab_destroy_debugcheck() argument
1604 static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) in slab_destroy() argument
1608 freelist = slab->freelist; in slab_destroy()
1609 slab_destroy_debugcheck(cachep, slab); in slab_destroy()
1611 call_rcu(&slab->rcu_head, kmem_rcu_free); in slab_destroy()
1613 kmem_freepages(cachep, slab); in slab_destroy()
1629 struct slab *slab, *n; in slabs_destroy() local
1631 list_for_each_entry_safe(slab, n, list, slab_list) { in slabs_destroy()
1632 list_del(&slab->slab_list); in slabs_destroy()
1633 slab_destroy(cachep, slab); in slabs_destroy()
2184 struct slab *slab; in drain_freelist() local
2196 slab = list_entry(p, struct slab, slab_list); in drain_freelist()
2197 list_del(&slab->slab_list); in drain_freelist()
2206 slab_destroy(cache, slab); in drain_freelist()
2283 struct slab *slab, int colour_off, in alloc_slabmgmt() argument
2287 void *addr = slab_address(slab); in alloc_slabmgmt()
2289 slab->s_mem = addr + colour_off; in alloc_slabmgmt()
2290 slab->active = 0; in alloc_slabmgmt()
2307 static inline freelist_idx_t get_free_obj(struct slab *slab, unsigned int idx) in get_free_obj() argument
2309 return ((freelist_idx_t *) slab->freelist)[idx]; in get_free_obj()
2312 static inline void set_free_obj(struct slab *slab, in set_free_obj() argument
2315 ((freelist_idx_t *)(slab->freelist))[idx] = val; in set_free_obj()
2318 static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) in cache_init_objs_debug() argument
2324 void *objp = index_to_obj(cachep, slab, i); in cache_init_objs_debug()
2408 static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) in swap_free_obj() argument
2410 swap(((freelist_idx_t *) slab->freelist)[a], in swap_free_obj()
2411 ((freelist_idx_t *) slab->freelist)[b]); in swap_free_obj()
2418 static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) in shuffle_freelist() argument
2435 slab->freelist = index_to_obj(cachep, slab, objfreelist) + in shuffle_freelist()
2446 set_free_obj(slab, i, i); in shuffle_freelist()
2452 swap_free_obj(slab, i, rand); in shuffle_freelist()
2456 set_free_obj(slab, i, next_random_slot(&state)); in shuffle_freelist()
2460 set_free_obj(slab, cachep->num - 1, objfreelist); in shuffle_freelist()
2466 struct slab *slab) in shuffle_freelist() argument
2473 struct slab *slab) in cache_init_objs() argument
2479 cache_init_objs_debug(cachep, slab); in cache_init_objs()
2482 shuffled = shuffle_freelist(cachep, slab); in cache_init_objs()
2485 slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) + in cache_init_objs()
2490 objp = index_to_obj(cachep, slab, i); in cache_init_objs()
2501 set_free_obj(slab, i, i); in cache_init_objs()
2505 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab) in slab_get_obj() argument
2509 objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active)); in slab_get_obj()
2510 slab->active++; in slab_get_obj()
2516 struct slab *slab, void *objp) in slab_put_obj() argument
2518 unsigned int objnr = obj_to_index(cachep, slab, objp); in slab_put_obj()
2523 for (i = slab->active; i < cachep->num; i++) { in slab_put_obj()
2524 if (get_free_obj(slab, i) == objnr) { in slab_put_obj()
2531 slab->active--; in slab_put_obj()
2532 if (!slab->freelist) in slab_put_obj()
2533 slab->freelist = objp + obj_offset(cachep); in slab_put_obj()
2535 set_free_obj(slab, slab->active, objnr); in slab_put_obj()
2542 static struct slab *cache_grow_begin(struct kmem_cache *cachep, in cache_grow_begin()
2550 struct slab *slab; in cache_grow_begin() local
2570 slab = kmem_getpages(cachep, local_flags, nodeid); in cache_grow_begin()
2571 if (!slab) in cache_grow_begin()
2574 slab_node = slab_nid(slab); in cache_grow_begin()
2593 kasan_poison_slab(slab); in cache_grow_begin()
2596 freelist = alloc_slabmgmt(cachep, slab, offset, in cache_grow_begin()
2601 slab->slab_cache = cachep; in cache_grow_begin()
2602 slab->freelist = freelist; in cache_grow_begin()
2604 cache_init_objs(cachep, slab); in cache_grow_begin()
2609 return slab; in cache_grow_begin()
2612 kmem_freepages(cachep, slab); in cache_grow_begin()
2619 static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab) in cache_grow_end() argument
2626 if (!slab) in cache_grow_end()
2629 INIT_LIST_HEAD(&slab->slab_list); in cache_grow_end()
2630 n = get_node(cachep, slab_nid(slab)); in cache_grow_end()
2634 if (!slab->active) { in cache_grow_end()
2635 list_add_tail(&slab->slab_list, &n->slabs_free); in cache_grow_end()
2638 fixup_slab_list(cachep, n, slab, &list); in cache_grow_end()
2641 n->free_objects += cachep->num - slab->active; in cache_grow_end()
2689 struct slab *slab; in cache_free_debugcheck() local
2695 slab = virt_to_slab(objp); in cache_free_debugcheck()
2705 objnr = obj_to_index(cachep, slab, objp); in cache_free_debugcheck()
2708 BUG_ON(objp != index_to_obj(cachep, slab, objnr)); in cache_free_debugcheck()
2738 struct kmem_cache_node *n, struct slab *slab, in fixup_slab_list() argument
2742 list_del(&slab->slab_list); in fixup_slab_list()
2743 if (slab->active == cachep->num) { in fixup_slab_list()
2744 list_add(&slab->slab_list, &n->slabs_full); in fixup_slab_list()
2749 void **objp = slab->freelist; in fixup_slab_list()
2755 slab->freelist = NULL; in fixup_slab_list()
2758 list_add(&slab->slab_list, &n->slabs_partial); in fixup_slab_list()
2762 static noinline struct slab *get_valid_first_slab(struct kmem_cache_node *n, in get_valid_first_slab()
2763 struct slab *slab, bool pfmemalloc) in get_valid_first_slab() argument
2765 if (!slab) in get_valid_first_slab()
2769 return slab; in get_valid_first_slab()
2771 if (!slab_test_pfmemalloc(slab)) in get_valid_first_slab()
2772 return slab; in get_valid_first_slab()
2776 slab_clear_pfmemalloc(slab); in get_valid_first_slab()
2777 return slab; in get_valid_first_slab()
2781 list_del(&slab->slab_list); in get_valid_first_slab()
2782 if (!slab->active) { in get_valid_first_slab()
2783 list_add_tail(&slab->slab_list, &n->slabs_free); in get_valid_first_slab()
2786 list_add_tail(&slab->slab_list, &n->slabs_partial); in get_valid_first_slab()
2788 list_for_each_entry(slab, &n->slabs_partial, slab_list) { in get_valid_first_slab()
2789 if (!slab_test_pfmemalloc(slab)) in get_valid_first_slab()
2790 return slab; in get_valid_first_slab()
2794 list_for_each_entry(slab, &n->slabs_free, slab_list) { in get_valid_first_slab()
2795 if (!slab_test_pfmemalloc(slab)) { in get_valid_first_slab()
2797 return slab; in get_valid_first_slab()
2804 static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) in get_first_slab()
2806 struct slab *slab; in get_first_slab() local
2809 slab = list_first_entry_or_null(&n->slabs_partial, struct slab, in get_first_slab()
2811 if (!slab) { in get_first_slab()
2813 slab = list_first_entry_or_null(&n->slabs_free, struct slab, in get_first_slab()
2815 if (slab) in get_first_slab()
2820 slab = get_valid_first_slab(n, slab, pfmemalloc); in get_first_slab()
2822 return slab; in get_first_slab()
2828 struct slab *slab; in cache_alloc_pfmemalloc() local
2836 slab = get_first_slab(n, true); in cache_alloc_pfmemalloc()
2837 if (!slab) { in cache_alloc_pfmemalloc()
2842 obj = slab_get_obj(cachep, slab); in cache_alloc_pfmemalloc()
2845 fixup_slab_list(cachep, n, slab, &list); in cache_alloc_pfmemalloc()
2858 struct array_cache *ac, struct slab *slab, int batchcount) in alloc_block() argument
2864 BUG_ON(slab->active >= cachep->num); in alloc_block()
2866 while (slab->active < cachep->num && batchcount--) { in alloc_block()
2871 ac->entry[ac->avail++] = slab_get_obj(cachep, slab); in alloc_block()
2884 struct slab *slab; in cache_alloc_refill() local
2917 slab = get_first_slab(n, false); in cache_alloc_refill()
2918 if (!slab) in cache_alloc_refill()
2923 batchcount = alloc_block(cachep, ac, slab, batchcount); in cache_alloc_refill()
2924 fixup_slab_list(cachep, n, slab, &list); in cache_alloc_refill()
2943 slab = cache_grow_begin(cachep, gfp_exact_node(flags), node); in cache_alloc_refill()
2950 if (!ac->avail && slab) in cache_alloc_refill()
2951 alloc_block(cachep, ac, slab, batchcount); in cache_alloc_refill()
2952 cache_grow_end(cachep, slab); in cache_alloc_refill()
3077 struct slab *slab; in fallback_alloc() local
3113 slab = cache_grow_begin(cache, flags, numa_mem_id()); in fallback_alloc()
3114 cache_grow_end(cache, slab); in fallback_alloc()
3115 if (slab) { in fallback_alloc()
3116 nid = slab_nid(slab); in fallback_alloc()
3140 struct slab *slab; in ____cache_alloc_node() local
3151 slab = get_first_slab(n, false); in ____cache_alloc_node()
3152 if (!slab) in ____cache_alloc_node()
3161 BUG_ON(slab->active == cachep->num); in ____cache_alloc_node()
3163 obj = slab_get_obj(cachep, slab); in ____cache_alloc_node()
3166 fixup_slab_list(cachep, n, slab, &list); in ____cache_alloc_node()
3174 slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); in ____cache_alloc_node()
3175 if (slab) { in ____cache_alloc_node()
3177 obj = slab_get_obj(cachep, slab); in ____cache_alloc_node()
3179 cache_grow_end(cachep, slab); in ____cache_alloc_node()
3279 struct slab *slab; in free_block() local
3285 struct slab *slab; in free_block() local
3289 slab = virt_to_slab(objp); in free_block()
3290 list_del(&slab->slab_list); in free_block()
3292 slab_put_obj(cachep, slab, objp); in free_block()
3296 if (slab->active == 0) { in free_block()
3297 list_add(&slab->slab_list, &n->slabs_free); in free_block()
3304 list_add_tail(&slab->slab_list, &n->slabs_partial); in free_block()
3311 slab = list_last_entry(&n->slabs_free, struct slab, slab_list); in free_block()
3312 list_move(&slab->slab_list, list); in free_block()
3348 struct slab *slab; in cache_flusharray() local
3350 list_for_each_entry(slab, &n->slabs_free, slab_list) { in cache_flusharray()
3351 BUG_ON(slab->active); in cache_flusharray()
3428 struct slab *slab = virt_to_slab(objp); in ___cache_free() local
3430 if (unlikely(slab_test_pfmemalloc(slab))) { in ___cache_free()
3431 cache_free_pfmemalloc(cachep, slab, objp); in ___cache_free()
3546 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kmem_obj_info() argument
3553 kpp->kp_slab = slab; in __kmem_obj_info()
3554 cachep = slab->slab_cache; in __kmem_obj_info()
3558 slab = virt_to_slab(objp); in __kmem_obj_info()
3559 objnr = obj_to_index(cachep, slab, objp); in __kmem_obj_info()
3560 objp = index_to_obj(cachep, slab, objnr); in __kmem_obj_info()
4019 const struct slab *slab, bool to_user) in __check_heap_object() argument
4028 cachep = slab->slab_cache; in __check_heap_object()
4029 objnr = obj_to_index(cachep, slab, (void *)ptr); in __check_heap_object()
4036 offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep); in __check_heap_object()