Lines Matching refs:slab

207 	struct slab **slab;  member
499 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument
501 struct page *page = slab_page(slab); in slab_lock()
507 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument
509 struct page *page = slab_page(slab); in slab_unlock()
522 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, in __cmpxchg_double_slab() argument
532 if (cmpxchg_double(&slab->freelist, &slab->counters, in __cmpxchg_double_slab()
539 slab_lock(slab); in __cmpxchg_double_slab()
540 if (slab->freelist == freelist_old && in __cmpxchg_double_slab()
541 slab->counters == counters_old) { in __cmpxchg_double_slab()
542 slab->freelist = freelist_new; in __cmpxchg_double_slab()
543 slab->counters = counters_new; in __cmpxchg_double_slab()
544 slab_unlock(slab); in __cmpxchg_double_slab()
547 slab_unlock(slab); in __cmpxchg_double_slab()
560 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, in cmpxchg_double_slab() argument
568 if (cmpxchg_double(&slab->freelist, &slab->counters, in cmpxchg_double_slab()
578 slab_lock(slab); in cmpxchg_double_slab()
579 if (slab->freelist == freelist_old && in cmpxchg_double_slab()
580 slab->counters == counters_old) { in cmpxchg_double_slab()
581 slab->freelist = freelist_new; in cmpxchg_double_slab()
582 slab->counters = counters_new; in cmpxchg_double_slab()
583 slab_unlock(slab); in cmpxchg_double_slab()
587 slab_unlock(slab); in cmpxchg_double_slab()
606 struct slab *slab) in __fill_map() argument
608 void *addr = slab_address(slab); in __fill_map()
611 bitmap_zero(obj_map, slab->objects); in __fill_map()
613 for (p = slab->freelist; p; p = get_freepointer(s, p)) in __fill_map()
687 struct slab *slab, void *object) in check_valid_pointer() argument
694 base = slab_address(slab); in check_valid_pointer()
697 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
825 static void print_slab_info(const struct slab *slab) in print_slab_info() argument
827 struct folio *folio = (struct folio *)slab_folio(slab); in print_slab_info()
830 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info()
913 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) in print_trailer() argument
916 u8 *addr = slab_address(slab); in print_trailer()
920 print_slab_info(slab); in print_trailer()
955 static void object_err(struct kmem_cache *s, struct slab *slab, in object_err() argument
962 print_trailer(s, slab, object); in object_err()
966 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
970 !check_valid_pointer(s, slab, nextfree) && freelist) { in freelist_corrupted()
971 object_err(s, slab, *freelist, "Freechain corrupt"); in freelist_corrupted()
980 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, in slab_err() argument
993 print_slab_info(slab); in slab_err()
1032 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, in check_bytes_and_report() argument
1038 u8 *addr = slab_address(slab); in check_bytes_and_report()
1057 print_trailer(s, slab, object); in check_bytes_and_report()
1104 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) in check_pad_bytes() argument
1121 return check_bytes_and_report(s, slab, p, "Object padding", in check_pad_bytes()
1126 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) in slab_pad_check() argument
1138 start = slab_address(slab); in slab_pad_check()
1139 length = slab_size(slab); in slab_pad_check()
1154 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
1161 static int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1169 if (!check_bytes_and_report(s, slab, object, "Left Redzone", in check_object()
1173 if (!check_bytes_and_report(s, slab, object, "Right Redzone", in check_object()
1181 !check_bytes_and_report(s, slab, object, in check_object()
1189 check_bytes_and_report(s, slab, p, "Alignment padding", in check_object()
1197 (!check_bytes_and_report(s, slab, p, "Poison", p, in check_object()
1199 !check_bytes_and_report(s, slab, p, "End Poison", in check_object()
1205 check_pad_bytes(s, slab, p); in check_object()
1216 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { in check_object()
1217 object_err(s, slab, p, "Freepointer corrupt"); in check_object()
1229 static int check_slab(struct kmem_cache *s, struct slab *slab) in check_slab() argument
1233 if (!folio_test_slab(slab_folio(slab))) { in check_slab()
1234 slab_err(s, slab, "Not a valid slab page"); in check_slab()
1238 maxobj = order_objects(slab_order(slab), s->size); in check_slab()
1239 if (slab->objects > maxobj) { in check_slab()
1240 slab_err(s, slab, "objects %u > max %u", in check_slab()
1241 slab->objects, maxobj); in check_slab()
1244 if (slab->inuse > slab->objects) { in check_slab()
1245 slab_err(s, slab, "inuse %u > max %u", in check_slab()
1246 slab->inuse, slab->objects); in check_slab()
1250 slab_pad_check(s, slab); in check_slab()
1258 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) in on_freelist() argument
1265 fp = slab->freelist; in on_freelist()
1266 while (fp && nr <= slab->objects) { in on_freelist()
1269 if (!check_valid_pointer(s, slab, fp)) { in on_freelist()
1271 object_err(s, slab, object, in on_freelist()
1275 slab_err(s, slab, "Freepointer corrupt"); in on_freelist()
1276 slab->freelist = NULL; in on_freelist()
1277 slab->inuse = slab->objects; in on_freelist()
1288 max_objects = order_objects(slab_order(slab), s->size); in on_freelist()
1292 if (slab->objects != max_objects) { in on_freelist()
1293 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1294 slab->objects, max_objects); in on_freelist()
1295 slab->objects = max_objects; in on_freelist()
1298 if (slab->inuse != slab->objects - nr) { in on_freelist()
1299 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1300 slab->inuse, slab->objects - nr); in on_freelist()
1301 slab->inuse = slab->objects - nr; in on_freelist()
1307 static void trace(struct kmem_cache *s, struct slab *slab, void *object, in trace() argument
1314 object, slab->inuse, in trace()
1315 slab->freelist); in trace()
1329 struct kmem_cache_node *n, struct slab *slab) in add_full() argument
1335 list_add(&slab->slab_list, &n->full); in add_full()
1338 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) in remove_full() argument
1344 list_del(&slab->slab_list); in remove_full()
1394 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) in setup_slab_debug() argument
1400 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); in setup_slab_debug()
1405 struct slab *slab, void *object) in alloc_consistency_checks() argument
1407 if (!check_slab(s, slab)) in alloc_consistency_checks()
1410 if (!check_valid_pointer(s, slab, object)) { in alloc_consistency_checks()
1411 object_err(s, slab, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1415 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1422 struct slab *slab, void *object, int orig_size) in alloc_debug_processing() argument
1425 if (!alloc_consistency_checks(s, slab, object)) in alloc_debug_processing()
1430 trace(s, slab, object, 1); in alloc_debug_processing()
1436 if (folio_test_slab(slab_folio(slab))) { in alloc_debug_processing()
1443 slab->inuse = slab->objects; in alloc_debug_processing()
1444 slab->freelist = NULL; in alloc_debug_processing()
1450 struct slab *slab, void *object, unsigned long addr) in free_consistency_checks() argument
1452 if (!check_valid_pointer(s, slab, object)) { in free_consistency_checks()
1453 slab_err(s, slab, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1457 if (on_freelist(s, slab, object)) { in free_consistency_checks()
1458 object_err(s, slab, object, "Object already free"); in free_consistency_checks()
1462 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1465 if (unlikely(s != slab->slab_cache)) { in free_consistency_checks()
1466 if (!folio_test_slab(slab_folio(slab))) { in free_consistency_checks()
1467 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1469 } else if (!slab->slab_cache) { in free_consistency_checks()
1474 object_err(s, slab, object, in free_consistency_checks()
1697 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} in setup_slab_debug() argument
1700 struct slab *slab, void *object, int orig_size) { return true; } in alloc_debug_processing() argument
1703 struct slab *slab, void *head, void *tail, int *bulk_cnt, in free_debug_processing() argument
1706 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} in slab_pad_check() argument
1707 static inline int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1713 struct slab *slab) {} in add_full() argument
1715 struct slab *slab) {} in remove_full() argument
1735 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
1843 static inline struct slab *alloc_slab_page(gfp_t flags, int node, in alloc_slab_page()
1847 struct slab *slab; in alloc_slab_page() local
1858 slab = folio_slab(folio); in alloc_slab_page()
1863 slab_set_pfmemalloc(slab); in alloc_slab_page()
1865 return slab; in alloc_slab_page()
1910 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, in next_freelist_entry() argument
1932 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1939 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
1945 page_limit = slab->objects * s->size; in shuffle_freelist()
1946 start = fixup_red_left(s, slab_address(slab)); in shuffle_freelist()
1949 cur = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1952 slab->freelist = cur; in shuffle_freelist()
1954 for (idx = 1; idx < slab->objects; idx++) { in shuffle_freelist()
1955 next = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1971 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1977 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1979 struct slab *slab; in allocate_slab() local
1998 slab = alloc_slab_page(alloc_gfp, node, oo); in allocate_slab()
1999 if (unlikely(!slab)) { in allocate_slab()
2006 slab = alloc_slab_page(alloc_gfp, node, oo); in allocate_slab()
2007 if (unlikely(!slab)) in allocate_slab()
2012 slab->objects = oo_objects(oo); in allocate_slab()
2013 slab->inuse = 0; in allocate_slab()
2014 slab->frozen = 0; in allocate_slab()
2016 account_slab(slab, oo_order(oo), s, flags); in allocate_slab()
2018 slab->slab_cache = s; in allocate_slab()
2020 kasan_poison_slab(slab); in allocate_slab()
2022 start = slab_address(slab); in allocate_slab()
2024 setup_slab_debug(s, slab, start); in allocate_slab()
2026 shuffle = shuffle_freelist(s, slab); in allocate_slab()
2031 slab->freelist = start; in allocate_slab()
2032 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { in allocate_slab()
2041 return slab; in allocate_slab()
2044 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
2055 static void __free_slab(struct kmem_cache *s, struct slab *slab) in __free_slab() argument
2057 struct folio *folio = slab_folio(slab); in __free_slab()
2061 __slab_clear_pfmemalloc(slab); in __free_slab()
2068 unaccount_slab(slab, order, s); in __free_slab()
2074 struct slab *slab = container_of(h, struct slab, rcu_head); in rcu_free_slab() local
2076 __free_slab(slab->slab_cache, slab); in rcu_free_slab()
2079 static void free_slab(struct kmem_cache *s, struct slab *slab) in free_slab() argument
2084 slab_pad_check(s, slab); in free_slab()
2085 for_each_object(p, s, slab_address(slab), slab->objects) in free_slab()
2086 check_object(s, slab, p, SLUB_RED_INACTIVE); in free_slab()
2090 call_rcu(&slab->rcu_head, rcu_free_slab); in free_slab()
2092 __free_slab(s, slab); in free_slab()
2095 static void discard_slab(struct kmem_cache *s, struct slab *slab) in discard_slab() argument
2097 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
2098 free_slab(s, slab); in discard_slab()
2105 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) in __add_partial() argument
2109 list_add_tail(&slab->slab_list, &n->partial); in __add_partial()
2111 list_add(&slab->slab_list, &n->partial); in __add_partial()
2115 struct slab *slab, int tail) in add_partial() argument
2118 __add_partial(n, slab, tail); in add_partial()
2122 struct slab *slab) in remove_partial() argument
2125 list_del(&slab->slab_list); in remove_partial()
2136 struct kmem_cache_node *n, struct slab *slab, int orig_size) in alloc_single_from_partial() argument
2142 object = slab->freelist; in alloc_single_from_partial()
2143 slab->freelist = get_freepointer(s, object); in alloc_single_from_partial()
2144 slab->inuse++; in alloc_single_from_partial()
2146 if (!alloc_debug_processing(s, slab, object, orig_size)) { in alloc_single_from_partial()
2147 remove_partial(n, slab); in alloc_single_from_partial()
2151 if (slab->inuse == slab->objects) { in alloc_single_from_partial()
2152 remove_partial(n, slab); in alloc_single_from_partial()
2153 add_full(s, n, slab); in alloc_single_from_partial()
2165 struct slab *slab, int orig_size) in alloc_single_from_new_slab() argument
2167 int nid = slab_nid(slab); in alloc_single_from_new_slab()
2173 object = slab->freelist; in alloc_single_from_new_slab()
2174 slab->freelist = get_freepointer(s, object); in alloc_single_from_new_slab()
2175 slab->inuse = 1; in alloc_single_from_new_slab()
2177 if (!alloc_debug_processing(s, slab, object, orig_size)) in alloc_single_from_new_slab()
2187 if (slab->inuse == slab->objects) in alloc_single_from_new_slab()
2188 add_full(s, n, slab); in alloc_single_from_new_slab()
2190 add_partial(n, slab, DEACTIVATE_TO_HEAD); in alloc_single_from_new_slab()
2192 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
2205 struct kmem_cache_node *n, struct slab *slab, in acquire_slab() argument
2210 struct slab new; in acquire_slab()
2219 freelist = slab->freelist; in acquire_slab()
2220 counters = slab->counters; in acquire_slab()
2223 new.inuse = slab->objects; in acquire_slab()
2232 if (!__cmpxchg_double_slab(s, slab, in acquire_slab()
2238 remove_partial(n, slab); in acquire_slab()
2244 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2246 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, in put_cpu_partial() argument
2249 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2257 struct slab *slab, *slab2; in get_partial_node() local
2272 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { in get_partial_node()
2275 if (!pfmemalloc_match(slab, pc->flags)) in get_partial_node()
2279 object = alloc_single_from_partial(s, n, slab, in get_partial_node()
2286 t = acquire_slab(s, n, slab, object == NULL); in get_partial_node()
2291 *pc->slab = slab; in get_partial_node()
2295 put_cpu_partial(s, slab, 0); in get_partial_node()
2474 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, in deactivate_slab() argument
2478 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in deactivate_slab()
2484 struct slab new; in deactivate_slab()
2485 struct slab old; in deactivate_slab()
2487 if (slab->freelist) { in deactivate_slab()
2506 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) in deactivate_slab()
2529 old.freelist = READ_ONCE(slab->freelist); in deactivate_slab()
2530 old.counters = READ_ONCE(slab->counters); in deactivate_slab()
2558 if (!cmpxchg_double_slab(s, slab, in deactivate_slab()
2569 add_partial(n, slab, tail); in deactivate_slab()
2574 discard_slab(s, slab); in deactivate_slab()
2582 static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) in __unfreeze_partials()
2585 struct slab *slab, *slab_to_discard = NULL; in __unfreeze_partials() local
2589 struct slab new; in __unfreeze_partials()
2590 struct slab old; in __unfreeze_partials()
2592 slab = partial_slab; in __unfreeze_partials()
2593 partial_slab = slab->next; in __unfreeze_partials()
2595 n2 = get_node(s, slab_nid(slab)); in __unfreeze_partials()
2606 old.freelist = slab->freelist; in __unfreeze_partials()
2607 old.counters = slab->counters; in __unfreeze_partials()
2615 } while (!__cmpxchg_double_slab(s, slab, in __unfreeze_partials()
2621 slab->next = slab_to_discard; in __unfreeze_partials()
2622 slab_to_discard = slab; in __unfreeze_partials()
2624 add_partial(n, slab, DEACTIVATE_TO_TAIL); in __unfreeze_partials()
2633 slab = slab_to_discard; in __unfreeze_partials()
2637 discard_slab(s, slab); in __unfreeze_partials()
2647 struct slab *partial_slab; in unfreeze_partials()
2662 struct slab *partial_slab; in unfreeze_partials_cpu()
2678 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) in put_cpu_partial() argument
2680 struct slab *oldslab; in put_cpu_partial()
2681 struct slab *slab_to_unfreeze = NULL; in put_cpu_partial()
2705 slab->slabs = slabs; in put_cpu_partial()
2706 slab->next = oldslab; in put_cpu_partial()
2708 this_cpu_write(s->cpu_slab->partial, slab); in put_cpu_partial()
2729 struct slab *slab; in flush_slab() local
2734 slab = c->slab; in flush_slab()
2737 c->slab = NULL; in flush_slab()
2743 if (slab) { in flush_slab()
2744 deactivate_slab(s, slab, freelist); in flush_slab()
2753 struct slab *slab = c->slab; in __flush_cpu_slab() local
2755 c->slab = NULL; in __flush_cpu_slab()
2759 if (slab) { in __flush_cpu_slab()
2760 deactivate_slab(s, slab, freelist); in __flush_cpu_slab()
2789 if (c->slab) in flush_cpu_slab()
2799 return c->slab || slub_percpu_partial(c); in has_cpu_slab()
2868 static inline int node_match(struct slab *slab, int node) in node_match() argument
2871 if (node != NUMA_NO_NODE && slab_nid(slab) != node) in node_match()
2878 static int count_free(struct slab *slab) in count_free() argument
2880 return slab->objects - slab->inuse; in count_free()
2890 struct slab *slab, void *head, void *tail, int *bulk_cnt, in free_debug_processing() argument
2898 if (!check_slab(s, slab)) in free_debug_processing()
2902 if (slab->inuse < *bulk_cnt) { in free_debug_processing()
2903 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", in free_debug_processing()
2904 slab->inuse, *bulk_cnt); in free_debug_processing()
2914 if (!free_consistency_checks(s, slab, object, addr)) in free_debug_processing()
2920 trace(s, slab, object, 0); in free_debug_processing()
2933 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", in free_debug_processing()
2949 int (*get_count)(struct slab *)) in count_partial() argument
2953 struct slab *slab; in count_partial() local
2956 list_for_each_entry(slab, &n->partial, slab_list) in count_partial()
2957 x += get_count(slab); in count_partial()
3003 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) in pfmemalloc_match() argument
3005 if (unlikely(slab_test_pfmemalloc(slab))) in pfmemalloc_match()
3020 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) in get_freelist() argument
3022 struct slab new; in get_freelist()
3029 freelist = slab->freelist; in get_freelist()
3030 counters = slab->counters; in get_freelist()
3035 new.inuse = slab->objects; in get_freelist()
3038 } while (!__cmpxchg_double_slab(s, slab, in get_freelist()
3069 struct slab *slab; in ___slab_alloc() local
3077 slab = READ_ONCE(c->slab); in ___slab_alloc()
3078 if (!slab) { in ___slab_alloc()
3090 if (unlikely(!node_match(slab, node))) { in ___slab_alloc()
3108 if (unlikely(!pfmemalloc_match(slab, gfpflags))) in ___slab_alloc()
3113 if (unlikely(slab != c->slab)) { in ___slab_alloc()
3121 freelist = get_freelist(s, slab); in ___slab_alloc()
3124 c->slab = NULL; in ___slab_alloc()
3142 VM_BUG_ON(!c->slab->frozen); in ___slab_alloc()
3151 if (slab != c->slab) { in ___slab_alloc()
3156 c->slab = NULL; in ___slab_alloc()
3160 deactivate_slab(s, slab, freelist); in ___slab_alloc()
3166 if (unlikely(c->slab)) { in ___slab_alloc()
3176 slab = c->slab = slub_percpu_partial(c); in ___slab_alloc()
3177 slub_set_percpu_partial(c, slab); in ___slab_alloc()
3186 pc.slab = &slab; in ___slab_alloc()
3193 slab = new_slab(s, gfpflags, node); in ___slab_alloc()
3196 if (unlikely(!slab)) { in ___slab_alloc()
3204 freelist = alloc_single_from_new_slab(s, slab, orig_size); in ___slab_alloc()
3219 freelist = slab->freelist; in ___slab_alloc()
3220 slab->freelist = NULL; in ___slab_alloc()
3221 slab->inuse = slab->objects; in ___slab_alloc()
3222 slab->frozen = 1; in ___slab_alloc()
3224 inc_slabs_node(s, slab_nid(slab), slab->objects); in ___slab_alloc()
3240 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { in ___slab_alloc()
3245 deactivate_slab(s, slab, get_freepointer(s, freelist)); in ___slab_alloc()
3252 if (unlikely(c->slab)) { in ___slab_alloc()
3254 struct slab *flush_slab = c->slab; in ___slab_alloc()
3256 c->slab = NULL; in ___slab_alloc()
3268 c->slab = slab; in ___slab_alloc()
3303 struct slab *slab; in __slab_alloc_node() local
3341 slab = c->slab; in __slab_alloc_node()
3344 unlikely(!object || !slab || !node_match(slab, node))) { in __slab_alloc_node()
3382 struct slab *slab; in __slab_alloc_node() local
3386 pc.slab = &slab; in __slab_alloc_node()
3393 slab = new_slab(s, gfpflags, node); in __slab_alloc_node()
3394 if (unlikely(!slab)) { in __slab_alloc_node()
3399 object = alloc_single_from_new_slab(s, slab, orig_size); in __slab_alloc_node()
3506 struct kmem_cache *s, struct slab *slab, in free_to_partial_list() argument
3510 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in free_to_partial_list()
3511 struct slab *slab_free = NULL; in free_to_partial_list()
3521 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { in free_to_partial_list()
3522 void *prior = slab->freelist; in free_to_partial_list()
3525 slab->inuse -= cnt; in free_to_partial_list()
3527 slab->freelist = head; in free_to_partial_list()
3534 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) in free_to_partial_list()
3535 slab_free = slab; in free_to_partial_list()
3539 remove_full(s, n, slab); in free_to_partial_list()
3541 add_partial(n, slab, DEACTIVATE_TO_TAIL); in free_to_partial_list()
3545 remove_partial(n, slab); in free_to_partial_list()
3574 static void __slab_free(struct kmem_cache *s, struct slab *slab, in __slab_free() argument
3581 struct slab new; in __slab_free()
3592 free_to_partial_list(s, slab, head, tail, cnt, addr); in __slab_free()
3601 prior = slab->freelist; in __slab_free()
3602 counters = slab->counters; in __slab_free()
3621 n = get_node(s, slab_nid(slab)); in __slab_free()
3635 } while (!cmpxchg_double_slab(s, slab, in __slab_free()
3653 put_cpu_partial(s, slab, 1); in __slab_free()
3668 remove_full(s, n, slab); in __slab_free()
3669 add_partial(n, slab, DEACTIVATE_TO_TAIL); in __slab_free()
3680 remove_partial(n, slab); in __slab_free()
3684 remove_full(s, n, slab); in __slab_free()
3689 discard_slab(s, slab); in __slab_free()
3709 struct slab *slab, void *head, void *tail, in do_slab_free() argument
3730 if (unlikely(slab != c->slab)) { in do_slab_free()
3731 __slab_free(s, slab, head, tail_obj, cnt, addr); in do_slab_free()
3752 if (unlikely(slab != c->slab)) { in do_slab_free()
3769 struct slab *slab, void *head, void *tail, in do_slab_free() argument
3774 __slab_free(s, slab, head, tail_obj, cnt, addr); in do_slab_free()
3778 static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab, in slab_free() argument
3782 memcg_slab_free_hook(s, slab, p, cnt); in slab_free()
3788 do_slab_free(s, slab, head, tail, cnt, addr); in slab_free()
3814 struct slab *slab; member
3848 df->slab = NULL; in build_detached_freelist()
3852 df->slab = folio_slab(folio); in build_detached_freelist()
3853 df->s = df->slab->slab_cache; in build_detached_freelist()
3855 df->slab = folio_slab(folio); in build_detached_freelist()
3873 if (df->slab == virt_to_slab(object)) { in build_detached_freelist()
3902 if (!df.slab) in kmem_cache_free_bulk()
3905 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, in kmem_cache_free_bulk()
4234 struct slab *slab; in early_kmem_cache_node_alloc() local
4239 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
4241 BUG_ON(!slab); in early_kmem_cache_node_alloc()
4242 inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects); in early_kmem_cache_node_alloc()
4243 if (slab_nid(slab) != node) { in early_kmem_cache_node_alloc()
4248 n = slab->freelist; in early_kmem_cache_node_alloc()
4255 slab->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
4256 slab->inuse = 1; in early_kmem_cache_node_alloc()
4259 inc_slabs_node(kmem_cache_node, node, slab->objects); in early_kmem_cache_node_alloc()
4265 __add_partial(n, slab, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
4546 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, in list_slab_objects() argument
4550 void *addr = slab_address(slab); in list_slab_objects()
4553 slab_err(s, slab, text, s->name); in list_slab_objects()
4556 __fill_map(object_map, s, slab); in list_slab_objects()
4558 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
4577 struct slab *slab, *h; in free_partial() local
4581 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { in free_partial()
4582 if (!slab->inuse) { in free_partial()
4583 remove_partial(n, slab); in free_partial()
4584 list_add(&slab->slab_list, &discard); in free_partial()
4586 list_slab_objects(s, slab, in free_partial()
4592 list_for_each_entry_safe(slab, h, &discard, slab_list) in free_partial()
4593 discard_slab(s, slab); in free_partial()
4626 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kmem_obj_info() argument
4633 struct kmem_cache *s = slab->slab_cache; in __kmem_obj_info()
4637 kpp->kp_slab = slab; in __kmem_obj_info()
4639 base = slab_address(slab); in __kmem_obj_info()
4646 objnr = obj_to_index(s, slab, objp); in __kmem_obj_info()
4650 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
4726 const struct slab *slab, bool to_user) in __check_heap_object() argument
4735 s = slab->slab_cache; in __check_heap_object()
4738 if (ptr < slab_address(slab)) in __check_heap_object()
4746 offset = (ptr - slab_address(slab)) % s->size; in __check_heap_object()
4782 struct slab *slab; in __kmem_cache_do_shrink() local
4783 struct slab *t; in __kmem_cache_do_shrink()
4802 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { in __kmem_cache_do_shrink()
4803 int free = slab->objects - slab->inuse; in __kmem_cache_do_shrink()
4811 if (free == slab->objects) { in __kmem_cache_do_shrink()
4812 list_move(&slab->slab_list, &discard); in __kmem_cache_do_shrink()
4814 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
4816 list_move(&slab->slab_list, promote + free - 1); in __kmem_cache_do_shrink()
4829 list_for_each_entry_safe(slab, t, &discard, slab_list) in __kmem_cache_do_shrink()
4830 free_slab(s, slab); in __kmem_cache_do_shrink()
4986 struct slab *p; in bootstrap()
5112 static int count_inuse(struct slab *slab) in count_inuse() argument
5114 return slab->inuse; in count_inuse()
5117 static int count_total(struct slab *slab) in count_total() argument
5119 return slab->objects; in count_total()
5124 static void validate_slab(struct kmem_cache *s, struct slab *slab, in validate_slab() argument
5128 void *addr = slab_address(slab); in validate_slab()
5130 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) in validate_slab()
5134 __fill_map(obj_map, s, slab); in validate_slab()
5135 for_each_object(p, s, addr, slab->objects) { in validate_slab()
5139 if (!check_object(s, slab, p, val)) in validate_slab()
5148 struct slab *slab; in validate_slab_node() local
5153 list_for_each_entry(slab, &n->partial, slab_list) { in validate_slab_node()
5154 validate_slab(s, slab, obj_map); in validate_slab_node()
5166 list_for_each_entry(slab, &n->full, slab_list) { in validate_slab_node()
5167 validate_slab(s, slab, obj_map); in validate_slab_node()
5351 struct slab *slab, enum track_item alloc, in process_slab() argument
5354 void *addr = slab_address(slab); in process_slab()
5358 __fill_map(obj_map, s, slab); in process_slab()
5360 for_each_object(p, s, addr, slab->objects) in process_slab()
5404 struct slab *slab; in show_slab_objects() local
5406 slab = READ_ONCE(c->slab); in show_slab_objects()
5407 if (!slab) in show_slab_objects()
5410 node = slab_nid(slab); in show_slab_objects()
5412 x = slab->objects; in show_slab_objects()
5414 x = slab->inuse; in show_slab_objects()
5422 slab = slub_percpu_partial_read_once(c); in show_slab_objects()
5423 if (slab) { in show_slab_objects()
5424 node = slab_nid(slab); in show_slab_objects()
5430 x = slab->slabs; in show_slab_objects()
5637 struct slab *slab; in slabs_cpu_partial_show() local
5639 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5641 if (slab) in slabs_cpu_partial_show()
5642 slabs += slab->slabs; in slabs_cpu_partial_show()
5652 struct slab *slab; in slabs_cpu_partial_show() local
5654 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5655 if (slab) { in slabs_cpu_partial_show()
5656 slabs = READ_ONCE(slab->slabs); in slabs_cpu_partial_show()
6398 struct slab *slab; in slab_debug_trace_open() local
6404 list_for_each_entry(slab, &n->partial, slab_list) in slab_debug_trace_open()
6405 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
6406 list_for_each_entry(slab, &n->full, slab_list) in slab_debug_trace_open()
6407 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()