| /mm/kasan/ |
| A D | generic.c | 206 kasan_quarantine_remove_cache(cache); in kasan_cache_shrink() 211 if (!__kmem_cache_empty(cache)) in kasan_cache_shutdown() 212 kasan_quarantine_remove_cache(cache); in kasan_cache_shutdown() 381 cache->kasan_info.alloc_meta_offset = 0; in kasan_cache_create() 399 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || in kasan_cache_create() 400 slub_debug_orig_size(cache)) { in kasan_cache_create() 424 cache->object_size; in kasan_cache_create() 447 optimal_size = cache->object_size + optimal_redzone(cache->object_size); in kasan_cache_create() 534 struct kmem_cache *cache; in kasan_record_aux_stack() local 541 cache = slab->slab_cache; in kasan_record_aux_stack() [all …]
|
| A D | common.c | 158 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_new_object() 177 static inline u8 assign_tag(struct kmem_cache *cache, in assign_tag() argument 187 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag() 203 kasan_init_object_meta(cache, object); in __kasan_init_slab_obj() 243 kasan_save_free_info(cache, tagged_object); in poison_slab_object() 251 return check_slab_allocation(cache, object, ip); in __kasan_slab_pre_free() 275 poison_slab_object(cache, object, init); in __kasan_slab_free() 282 if (kasan_quarantine_put(cache, object)) in __kasan_slab_free() 326 kasan_unpoison(object, cache->object_size, init); in unpoison_slab_object() 330 kasan_save_alloc_info(cache, object, flags); in unpoison_slab_object() [all …]
|
| A D | quarantine.c | 145 void *object = qlink_to_object(qlink, cache); in qlink_free() 159 if (slab_want_init_on_free(cache) && in qlink_free() 160 cache->kasan_info.free_meta_offset == 0) in qlink_free() 163 ___cache_free(cache, object, _THIS_IP_); in qlink_free() 176 cache ? cache : qlink_to_cache(qlink); in qlist_free_all() 292 struct kmem_cache *cache) in qlist_move_cache() argument 305 if (obj_cache == cache) in qlist_move_cache() 316 struct kmem_cache *cache = arg; in __per_cpu_remove_cache() local 322 qlist_move_cache(q, &sq->qlist, cache); in __per_cpu_remove_cache() 364 qlist_free_all(&to_free, cache); in kasan_quarantine_remove_cache() [all …]
|
| A D | kasan_test_c.c | 963 struct kmem_cache *cache; in kmem_cache_oob() local 985 struct kmem_cache *cache; in kmem_cache_double_free() local 1006 struct kmem_cache *cache; in kmem_cache_invalid_free() local 1035 struct kmem_cache *cache; in kmem_cache_rcu_uaf() local 1078 struct kmem_cache *cache; in kmem_cache_double_destroy() local 1091 struct kmem_cache *cache; in kmem_cache_accounted() local 1115 struct kmem_cache *cache; in kmem_cache_bulk() local 1162 struct kmem_cache *cache; in mempool_prepare_slab() local 1178 return cache; in mempool_prepare_slab() 1248 struct kmem_cache *cache; in mempool_slab_oob_right() local [all …]
|
| A D | kasan.h | 218 struct kmem_cache *cache; member 359 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache); 376 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object); 388 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, 390 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, 392 void kasan_init_object_meta(struct kmem_cache *cache, const void *object); 400 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); 401 void kasan_save_free_info(struct kmem_cache *cache, void *object); 404 bool kasan_quarantine_put(struct kmem_cache *cache, void *object); 406 void kasan_quarantine_remove_cache(struct kmem_cache *cache); [all …]
|
| A D | tags.c | 97 static void save_stack_info(struct kmem_cache *cache, void *object, in save_stack_info() argument 128 entry->size = cache->object_size; in save_stack_info() 140 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) in kasan_save_alloc_info() argument 142 save_stack_info(cache, object, flags, false); in kasan_save_alloc_info() 145 void kasan_save_free_info(struct kmem_cache *cache, void *object) in kasan_save_free_info() argument 147 save_stack_info(cache, object, 0, true); in kasan_save_free_info()
|
| A D | report_generic.c | 46 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache) in kasan_get_alloc_size() argument 61 while (size < cache->object_size) { in kasan_get_alloc_size() 71 return cache->object_size; in kasan_get_alloc_size() 168 if (!info->cache || !info->object) in kasan_complete_mode_report_info() 171 alloc_meta = kasan_get_alloc_meta(info->cache, info->object); in kasan_complete_mode_report_info() 178 free_meta = kasan_get_free_meta(info->cache, info->object); in kasan_complete_mode_report_info() 189 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) in kasan_print_aux_stacks() argument 193 alloc_meta = kasan_get_alloc_meta(cache, object); in kasan_print_aux_stacks()
|
| A D | report.c | 302 info->object, info->cache->name, info->cache->object_size); in describe_object_addr() 347 kasan_print_aux_stacks(info->cache, info->object); in describe_object_stacks() 381 if (info->cache && info->object) { in print_address_description() 502 info->cache = slab->slab_cache; in complete_report_info() 503 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info() 506 info->alloc_size = kasan_get_alloc_size(info->object, info->cache); in complete_report_info() 509 info->alloc_size = info->cache->object_size; in complete_report_info() 511 info->cache = info->object = NULL; in complete_report_info()
|
| A D | report_hw_tags.c | 30 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache) in kasan_get_alloc_size() argument 45 while (size < cache->object_size) { in kasan_get_alloc_size() 54 return cache->object_size; in kasan_get_alloc_size()
|
| A D | report_sw_tags.c | 48 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache) in kasan_get_alloc_size() argument 63 while (size < cache->object_size) { in kasan_get_alloc_size() 71 return cache->object_size; in kasan_get_alloc_size()
|
| A D | report_tags.c | 37 if ((!info->cache || !info->object) && !info->bug_type) { in kasan_complete_mode_report_info() 63 info->cache->object_size != entry->size) in kasan_complete_mode_report_info()
|
| /mm/ |
| A D | mm_slot.h | 24 static inline void *mm_slot_alloc(struct kmem_cache *cache) in mm_slot_alloc() argument 26 if (!cache) /* initialization failed */ in mm_slot_alloc() 28 return kmem_cache_zalloc(cache, GFP_KERNEL); in mm_slot_alloc() 31 static inline void mm_slot_free(struct kmem_cache *cache, void *objp) in mm_slot_free() argument 33 kmem_cache_free(cache, objp); in mm_slot_free()
|
| A D | slab.h | 304 static inline void *nearest_obj(struct kmem_cache *cache, in nearest_obj() argument 307 void *object = x - (x - slab_address(slab)) % cache->size; in nearest_obj() 309 (slab->objects - 1) * cache->size; in nearest_obj() 312 result = fixup_red_left(cache, result); in nearest_obj() 317 static inline unsigned int __obj_to_index(const struct kmem_cache *cache, in __obj_to_index() argument 321 cache->reciprocal_size); in __obj_to_index() 324 static inline unsigned int obj_to_index(const struct kmem_cache *cache, in obj_to_index() argument 329 return __obj_to_index(cache, slab_address(slab), obj); in obj_to_index() 332 static inline int objs_per_slab(const struct kmem_cache *cache, in objs_per_slab() argument 598 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
| A D | Kconfig | 26 bool "Compressed cache for swap pages" 31 A lightweight compressed cache for swap pages. It takes 39 bool "Enable the compressed cache for swap pages by default" 42 If selected, the compressed cache for swap pages will be enabled 133 Selects the default allocator for the compressed cache for 223 cache layout), which makes such heap attacks easier to exploit 225 can usually only damage objects in the same cache. To disable 241 Many kernel heap attacks try to target slab cache metadata and 311 utilization of a direct-mapped memory-side-cache. See section 321 While the randomization improves cache utilization it may [all …]
|
| A D | Kconfig.debug | 56 will still exist (with SYSFS enabled), it will not provide e.g. cache 81 Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
|
| A D | slub.c | 4736 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) in ___cache_free() argument 4738 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); in ___cache_free()
|
| /mm/kfence/ |
| A D | core.c | 472 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc() 480 WRITE_ONCE(meta->cache, cache); in kfence_guarded_alloc() 489 slab->slab_cache = cache; in kfence_guarded_alloc() 500 if (unlikely(slab_want_init_on_alloc(gfp, cache))) in kfence_guarded_alloc() 502 if (cache->ctor) in kfence_guarded_alloc() 503 cache->ctor(addr); in kfence_guarded_alloc() 548 init = slab_want_init_on_free(meta->cache); in kfence_guarded_free() 1030 in_use = meta->cache == s && kfence_obj_allocated(meta); in kfence_shutdown_cache() 1060 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) in kfence_shutdown_cache() 1061 meta->cache = NULL; in kfence_shutdown_cache() [all …]
|
| A D | report.c | 139 const struct kmem_cache *const cache = meta->cache; in kfence_print_object() local 150 size, (cache && cache->name) ? cache->name : "<destroyed>"); in kfence_print_object() 320 kpp->kp_slab_cache = meta->cache; in __kfence_obj_info()
|
| A D | kfence.h | 88 struct kmem_cache *cache; member
|