1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLAB_DEF_H
3 #define	_LINUX_SLAB_DEF_H
4 
5 #include <linux/kfence.h>
6 #include <linux/reciprocal_div.h>
7 
8 /*
9  * Definitions unique to the original Linux SLAB allocator.
10  */
11 
12 struct kmem_cache {
13 	struct array_cache __percpu *cpu_cache;
14 
15 /* 1) Cache tunables. Protected by slab_mutex */
16 	unsigned int batchcount;
17 	unsigned int limit;
18 	unsigned int shared;
19 
20 	unsigned int size;
21 	struct reciprocal_value reciprocal_buffer_size;
22 /* 2) touched by every alloc & free from the backend */
23 
24 	slab_flags_t flags;		/* constant flags */
25 	unsigned int num;		/* # of objs per slab */
26 
27 /* 3) cache_grow/shrink */
28 	/* order of pgs per slab (2^n) */
29 	unsigned int gfporder;
30 
31 	/* force GFP flags, e.g. GFP_DMA */
32 	gfp_t allocflags;
33 
34 	size_t colour;			/* cache colouring range */
35 	unsigned int colour_off;	/* colour offset */
36 	unsigned int freelist_size;
37 
38 	/* constructor func */
39 	void (*ctor)(void *obj);
40 
41 /* 4) cache creation/removal */
42 	const char *name;
43 	struct list_head list;
44 	int refcount;
45 	int object_size;
46 	int align;
47 
48 /* 5) statistics */
49 #ifdef CONFIG_DEBUG_SLAB
50 	unsigned long num_active;
51 	unsigned long num_allocations;
52 	unsigned long high_mark;
53 	unsigned long grown;
54 	unsigned long reaped;
55 	unsigned long errors;
56 	unsigned long max_freeable;
57 	unsigned long node_allocs;
58 	unsigned long node_frees;
59 	unsigned long node_overflow;
60 	atomic_t allochit;
61 	atomic_t allocmiss;
62 	atomic_t freehit;
63 	atomic_t freemiss;
64 
65 	/*
66 	 * If debugging is enabled, then the allocator can add additional
67 	 * fields and/or padding to every object. 'size' contains the total
68 	 * object size including these internal fields, while 'obj_offset'
69 	 * and 'object_size' contain the offset to the user object and its
70 	 * size.
71 	 */
72 	int obj_offset;
73 #endif /* CONFIG_DEBUG_SLAB */
74 
75 #ifdef CONFIG_KASAN_GENERIC
76 	struct kasan_cache kasan_info;
77 #endif
78 
79 #ifdef CONFIG_SLAB_FREELIST_RANDOM
80 	unsigned int *random_seq;
81 #endif
82 
83 #ifdef CONFIG_HARDENED_USERCOPY
84 	unsigned int useroffset;	/* Usercopy region offset */
85 	unsigned int usersize;		/* Usercopy region size */
86 #endif
87 
88 	struct kmem_cache_node *node[MAX_NUMNODES];
89 };
90 
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)91 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
92 				void *x)
93 {
94 	void *object = x - (x - slab->s_mem) % cache->size;
95 	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
96 
97 	if (unlikely(object > last_object))
98 		return last_object;
99 	else
100 		return object;
101 }
102 
103 /*
104  * We want to avoid an expensive divide : (offset / cache->size)
105  *   Using the fact that size is a constant for a particular cache,
106  *   we can replace (offset / cache->size) by
107  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
108  */
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)109 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
110 					const struct slab *slab, void *obj)
111 {
112 	u32 offset = (obj - slab->s_mem);
113 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
114 }
115 
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)116 static inline int objs_per_slab(const struct kmem_cache *cache,
117 				     const struct slab *slab)
118 {
119 	if (is_kfence_address(slab_address(slab)))
120 		return 1;
121 	return cache->num;
122 }
123 
124 #endif	/* _LINUX_SLAB_DEF_H */
125