/linux-6.3-rc2/mm/ |
A D | slab.h | 9 struct slab { struct 153 static inline void slab_set_pfmemalloc(struct slab *slab) in slab_set_pfmemalloc() argument 158 static inline void slab_clear_pfmemalloc(struct slab *slab) in slab_clear_pfmemalloc() argument 168 static inline void *slab_address(const struct slab *slab) in slab_address() argument 173 static inline int slab_nid(const struct slab *slab) in slab_nid() argument 193 static inline int slab_order(const struct slab *slab) in slab_order() argument 198 static inline size_t slab_size(const struct slab *slab) in slab_size() argument 534 struct slab *slab; in memcg_slab_post_alloc_hook() local 640 struct slab *slab; in virt_to_cache() local 897 const struct slab *slab, bool to_user); [all …]
|
A D | slub.c | 207 struct slab **slab; member 830 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info() 1847 struct slab *slab; in alloc_slab_page() local 1979 struct slab *slab; in allocate_slab() local 2729 struct slab *slab; in flush_slab() local 2734 slab = c->slab; in flush_slab() 2753 struct slab *slab = c->slab; in __flush_cpu_slab() local 3186 pc.slab = &slab; in ___slab_alloc() 3268 c->slab = slab; in ___slab_alloc() 3341 slab = c->slab; in __slab_alloc_node() [all …]
|
A D | slab.c | 1356 struct slab *slab; in kmem_getpages() local 1404 struct slab *slab; in kmem_rcu_free() local 2184 struct slab *slab; in drain_freelist() local 2550 struct slab *slab; in cache_grow_begin() local 2689 struct slab *slab; in cache_free_debugcheck() local 2806 struct slab *slab; in get_first_slab() local 2828 struct slab *slab; in cache_alloc_pfmemalloc() local 2884 struct slab *slab; in cache_alloc_refill() local 3077 struct slab *slab; in fallback_alloc() local 3140 struct slab *slab; in ____cache_alloc_node() local [all …]
|
A D | slob.c | 108 static inline int slob_page_free(struct slab *slab) in slob_page_free() argument 110 return PageSlobFree(slab_page(slab)); in slob_page_free() 113 static void set_slob_page_free(struct slab *slab, struct list_head *list) in set_slob_page_free() argument 115 list_add(&slab->slab_list, list); in set_slob_page_free() 116 __SetPageSlobFree(slab_page(slab)); in set_slob_page_free() 119 static inline void clear_slob_page_free(struct slab *slab) in clear_slob_page_free() argument 121 list_del(&slab->slab_list); in clear_slob_page_free() 305 struct slab *sp; in slob_alloc() 386 struct slab *sp; in slob_free() 466 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kmem_obj_info() argument [all …]
|
A D | slab_common.c | 551 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in kmem_obj_info() argument 553 if (__kfence_obj_info(kpp, object, slab)) in kmem_obj_info() 555 __kmem_obj_info(kpp, object, slab); in kmem_obj_info() 577 struct slab *slab; in kmem_dump_obj() local 583 slab = virt_to_slab(object); in kmem_dump_obj() 584 if (WARN_ON_ONCE(!slab)) { in kmem_dump_obj() 588 kmem_obj_info(&kp, object, slab); in kmem_dump_obj() 1003 struct slab *slab; in kfree() local 1017 slab = folio_slab(folio); in kfree() 1018 s = slab->slab_cache; in kfree()
|
/linux-6.3-rc2/include/linux/ |
A D | slub_def.h | 52 struct slab *slab; /* The slab from which we are allocating */ member 54 struct slab *partial; /* Partially allocated frozen slabs */ 166 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument 168 void *object = x - (x - slab_address(slab)) % cache->size; in nearest_obj() 169 void *last_object = slab_address(slab) + in nearest_obj() 170 (slab->objects - 1) * cache->size; in nearest_obj() 186 const struct slab *slab, void *obj) in obj_to_index() argument 190 return __obj_to_index(cache, slab_address(slab), obj); in obj_to_index() 194 const struct slab *slab) in objs_per_slab() argument 196 return slab->objects; in objs_per_slab()
|
A D | slab_def.h | 91 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument 94 void *object = x - (x - slab->s_mem) % cache->size; in nearest_obj() 95 void *last_object = slab->s_mem + (cache->num - 1) * cache->size; in nearest_obj() 110 const struct slab *slab, void *obj) in obj_to_index() argument 112 u32 offset = (obj - slab->s_mem); in obj_to_index() 117 const struct slab *slab) in objs_per_slab() argument 119 if (is_kfence_address(slab_address(slab))) in objs_per_slab()
|
A D | kfence.h | 220 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 242 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
|
A D | kasan.h | 13 struct slab; 123 void __kasan_poison_slab(struct slab *slab); 124 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument 127 __kasan_poison_slab(slab); in kasan_poison_slab() 242 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
|
/linux-6.3-rc2/tools/mm/ |
A D | slabinfo.c | 54 struct slabinfo *slab; member 1143 a->slab = s; in link_slabs() 1248 slab->alias = 0; in read_slab_dir() 1249 slab->refs = 0; in read_slab_dir() 1303 slab++; in read_slab_dir() 1324 for (slab = slabinfo; (slab < slabinfo + slabs) && in output_slabs() 1327 if (slab->alias) in output_slabs() 1342 slab_debug(slab); in output_slabs() 1344 ops(slab); in output_slabs() 1346 slabcache(slab); in output_slabs() [all …]
|
/linux-6.3-rc2/Documentation/ABI/testing/ |
A D | sysfs-kernel-slab | 1 What: /sys/kernel/slab 13 What: /sys/kernel/slab/<cache>/aliases 22 What: /sys/kernel/slab/<cache>/align 31 What: /sys/kernel/slab/<cache>/alloc_calls 131 What: /sys/kernel/slab/<cache>/ctor 139 new slab is allocated. 242 a frozen slab (i.e. a remote cpu slab). It can be written to 338 What: /sys/kernel/slab/<cache>/order 346 number of objects per slab. If a slab cannot be allocated 384 when a new slab is allocated. [all …]
|
/linux-6.3-rc2/mm/kasan/ |
A D | common.c | 33 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab() 125 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument 127 struct page *page = slab_page(slab); in __kasan_poison_slab() 292 struct slab *slab = folio_slab(folio); in __kasan_slab_free_mempool() local 294 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); in __kasan_slab_free_mempool() 424 struct slab *slab; in __kasan_krealloc() local 436 slab = virt_to_slab(object); in __kasan_krealloc() 439 if (unlikely(!slab)) in __kasan_krealloc() 442 return ____kasan_kmalloc(slab->slab_cache, object, size, flags); in __kasan_krealloc()
|
A D | report.c | 440 struct slab *slab; in complete_report_info() local 448 slab = kasan_addr_to_slab(addr); in complete_report_info() 449 if (slab) { in complete_report_info() 450 info->cache = slab->slab_cache; in complete_report_info() 451 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
|
A D | generic.c | 476 struct slab *slab = kasan_addr_to_slab(addr); in __kasan_record_aux_stack() local 481 if (is_kfence_address(addr) || !slab) in __kasan_record_aux_stack() 484 cache = slab->slab_cache; in __kasan_record_aux_stack() 485 object = nearest_obj(cache, slab, addr); in __kasan_record_aux_stack()
|
/linux-6.3-rc2/tools/cgroup/ |
A D | memcg_slabinfo.py | 73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(), 75 nr_objs += fn(slab) 79 def count_free(slab): argument 80 return slab.objects - slab.inuse 195 for slab in for_each_slab(prog): 196 objcg_vec_raw = slab.memcg_data.value_() 199 cache = slab.slab_cache
|
/linux-6.3-rc2/lib/ |
A D | sg_pool.c | 13 struct kmem_cache *slab; member 150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init() 152 if (!sgp->slab) { in sg_pool_init() 159 sgp->slab); in sg_pool_init() 174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
|
/linux-6.3-rc2/Documentation/mm/ |
A D | slub.rst | 38 slub_debug=<Debug-Options>,<slab name1>,<slab name2>,... 57 caused higher minimum slab orders 105 /sys/kernel/slab/<slab name>/ 120 used on the wrong slab. 159 slab in order for the allocation order to be acceptable. In 161 allocations on a slab without consulting centralized resources 172 of a slab cache with large object sizes into one high order 230 BUG <slab cache affected>: <What went wrong> 234 INFO: Slab <address> <slab information> 242 set for the slab. slub_debug sets that option) [all …]
|
/linux-6.3-rc2/Documentation/translations/zh_CN/dev-tools/ |
A D | kasan.rst | 64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆栈和全局内存 67 基于软件标签的KASAN支持slab、page_alloc、vmalloc和堆栈内存。 69 基于硬件标签的KASAN支持slab、page_alloc和不可执行的vmalloc内存。 71 对于slab,两种软件KASAN模式都支持SLUB和SLAB分配器,而基于硬件标签的 89 要将受影响的slab对象的alloc和free堆栈跟踪包含到报告中,请启用 140 BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan] 196 flags: 0x200000000000100(slab) 211 堆栈跟踪、所访问内存分配位置的堆栈跟踪(对于访问了slab对象的情况)以及对象 213 slab对象的描述以及关于访问的内存页的信息。 231 请注意,KASAN错误标题(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
|
/linux-6.3-rc2/mm/kfence/ |
A D | core.c | 360 struct slab *slab; in kfence_guarded_alloc() local 428 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc() 429 slab->slab_cache = cache; in kfence_guarded_alloc() 431 slab->objects = 1; in kfence_guarded_alloc() 433 slab->s_mem = addr; in kfence_guarded_alloc() 559 struct slab *slab = page_slab(&pages[i]); in kfence_init_pool() local 568 __folio_set_slab(slab_folio(slab)); in kfence_init_pool() 636 struct slab *slab = virt_to_slab(p); in kfence_init_pool_early() local 638 if (!slab) in kfence_init_pool_early() 641 slab->memcg_data = 0; in kfence_init_pool_early() [all …]
|
/linux-6.3-rc2/net/dccp/ |
A D | ccid.c | 81 struct kmem_cache *slab; in ccid_kmem_cache_create() local 88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create() 90 return slab; in ccid_kmem_cache_create() 93 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument 95 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
|
/linux-6.3-rc2/tools/perf/Documentation/ |
A D | perf-kmem.txt | 47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit' 49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order, 51 mode selection options - i.e. --slab, --page, --alloc and/or --caller. 60 --slab::
|
/linux-6.3-rc2/include/net/ |
A D | request_sock.h | 30 struct kmem_cache *slab; member 92 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc() 98 kmem_cache_free(ops->slab, req); in reqsk_alloc() 123 kmem_cache_free(req->rsk_ops->slab, req); in __reqsk_free()
|
/linux-6.3-rc2/tools/testing/selftests/cgroup/ |
A D | test_kmem.c | 165 long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum; in test_kmem_memcg_deletion() local 183 slab = cg_read_key_long(parent, "memory.stat", "slab "); in test_kmem_memcg_deletion() 190 if (current < 0 || slab < 0 || anon < 0 || file < 0 || in test_kmem_memcg_deletion() 194 sum = slab + anon + file + kernel_stack + pagetables + percpu + sock; in test_kmem_memcg_deletion() 200 printf("slab = %ld\n", slab); in test_kmem_memcg_deletion()
|
/linux-6.3-rc2/tools/testing/scatterlist/ |
A D | Makefile | 17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h 31 @touch linux/slab.h
|
/linux-6.3-rc2/Documentation/translations/zh_CN/mm/ |
A D | split_page_table_lock.rst | 62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
|