1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
10 */
11
12 #ifndef _LINUX_SLAB_H
13 #define _LINUX_SLAB_H
14
15 #include <linux/gfp.h>
16 #include <linux/overflow.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 #include <linux/percpu-refcount.h>
20
21
22 /*
23 * Flags to pass to kmem_cache_create().
24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
25 */
26 /* DEBUG: Perform (expensive) checks on alloc/free */
27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28 /* DEBUG: Red zone objs in a cache */
29 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30 /* DEBUG: Poison objects */
31 #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32 /* Indicate a kmalloc slab */
33 #define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
34 /* Align objs on cache lines */
35 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
36 /* Use GFP_DMA memory */
37 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
38 /* Use GFP_DMA32 memory */
39 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
40 /* DEBUG: Store the last owner for bug hunting */
41 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
42 /* Panic if kmem_cache_create() fails */
43 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
44 /*
45 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
46 *
47 * This delays freeing the SLAB page by a grace period, it does _NOT_
48 * delay object freeing. This means that if you do kmem_cache_free()
49 * that memory location is free to be reused at any time. Thus it may
50 * be possible to see another object there in the same RCU grace period.
51 *
52 * This feature only ensures the memory location backing the object
53 * stays valid, the trick to using this is relying on an independent
54 * object validation pass. Something like:
55 *
56 * rcu_read_lock()
57 * again:
58 * obj = lockless_lookup(key);
59 * if (obj) {
60 * if (!try_get_ref(obj)) // might fail for free objects
61 * goto again;
62 *
63 * if (obj->key != key) { // not the object we expected
64 * put_ref(obj);
65 * goto again;
66 * }
67 * }
68 * rcu_read_unlock();
69 *
70 * This is useful if we need to approach a kernel structure obliquely,
71 * from its address obtained without the usual locking. We can lock
72 * the structure to stabilize it and check it's still at the given address,
73 * only if we can be sure that the memory has not been meanwhile reused
74 * for some other kind of object (which our subsystem's lock might corrupt).
75 *
76 * rcu_read_lock before reading the address, then rcu_read_unlock after
77 * taking the spinlock within the structure expected at that address.
78 *
79 * Note that it is not possible to acquire a lock within a structure
80 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
81 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
82 * are not zeroed before being given to the slab, which means that any
83 * locks must be initialized after each and every kmem_struct_alloc().
84 * Alternatively, make the ctor passed to kmem_cache_create() initialize
85 * the locks at page-allocation time, as is done in __i915_request_ctor(),
86 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
87 * to safely acquire those ctor-initialized locks under rcu_read_lock()
88 * protection.
89 *
90 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
91 */
92 /* Defer freeing slabs to RCU */
93 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
94 /* Spread some memory over cpuset */
95 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
96 /* Trace allocations and frees */
97 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
98
99 /* Flag to prevent checks on free */
100 #ifdef CONFIG_DEBUG_OBJECTS
101 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
102 #else
103 # define SLAB_DEBUG_OBJECTS 0
104 #endif
105
106 /* Avoid kmemleak tracing */
107 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
108
109 /* Fault injection mark */
110 #ifdef CONFIG_FAILSLAB
111 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
112 #else
113 # define SLAB_FAILSLAB 0
114 #endif
115 /* Account to memcg */
116 #ifdef CONFIG_MEMCG_KMEM
117 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
118 #else
119 # define SLAB_ACCOUNT 0
120 #endif
121
122 #ifdef CONFIG_KASAN_GENERIC
123 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
124 #else
125 #define SLAB_KASAN 0
126 #endif
127
128 /*
129 * Ignore user specified debugging flags.
130 * Intended for caches created for self-tests so they have only flags
131 * specified in the code and other flags are ignored.
132 */
133 #define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
134
135 #ifdef CONFIG_KFENCE
136 #define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
137 #else
138 #define SLAB_SKIP_KFENCE 0
139 #endif
140
141 /* The following flags affect the page allocator grouping pages by mobility */
142 /* Objects are reclaimable */
143 #ifndef CONFIG_SLUB_TINY
144 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
145 #else
146 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
147 #endif
148 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
149
150 /*
151 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
152 *
153 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
154 *
155 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
156 * Both make kfree a no-op.
157 */
158 #define ZERO_SIZE_PTR ((void *)16)
159
160 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
161 (unsigned long)ZERO_SIZE_PTR)
162
163 #include <linux/kasan.h>
164
165 struct list_lru;
166 struct mem_cgroup;
167 /*
168 * struct kmem_cache related prototypes
169 */
170 void __init kmem_cache_init(void);
171 bool slab_is_available(void);
172
173 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
174 unsigned int align, slab_flags_t flags,
175 void (*ctor)(void *));
176 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
177 unsigned int size, unsigned int align,
178 slab_flags_t flags,
179 unsigned int useroffset, unsigned int usersize,
180 void (*ctor)(void *));
181 void kmem_cache_destroy(struct kmem_cache *s);
182 int kmem_cache_shrink(struct kmem_cache *s);
183
184 /*
185 * Please use this macro to create slab caches. Simply specify the
186 * name of the structure and maybe some flags that are listed above.
187 *
188 * The alignment of the struct determines object alignment. If you
189 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
190 * then the objects will be properly aligned in SMP configurations.
191 */
192 #define KMEM_CACHE(__struct, __flags) \
193 kmem_cache_create(#__struct, sizeof(struct __struct), \
194 __alignof__(struct __struct), (__flags), NULL)
195
196 /*
197 * To whitelist a single field for copying to/from usercopy, use this
198 * macro instead for KMEM_CACHE() above.
199 */
200 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
201 kmem_cache_create_usercopy(#__struct, \
202 sizeof(struct __struct), \
203 __alignof__(struct __struct), (__flags), \
204 offsetof(struct __struct, __field), \
205 sizeof_field(struct __struct, __field), NULL)
206
207 /*
208 * Common kmalloc functions provided by all allocators
209 */
210 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
211 void kfree(const void *objp);
212 void kfree_sensitive(const void *objp);
213 size_t __ksize(const void *objp);
214
215 /**
216 * ksize - Report actual allocation size of associated object
217 *
218 * @objp: Pointer returned from a prior kmalloc()-family allocation.
219 *
220 * This should not be used for writing beyond the originally requested
221 * allocation size. Either use krealloc() or round up the allocation size
222 * with kmalloc_size_roundup() prior to allocation. If this is used to
223 * access beyond the originally requested allocation size, UBSAN_BOUNDS
224 * and/or FORTIFY_SOURCE may trip, since they only know about the
225 * originally allocated size via the __alloc_size attribute.
226 */
227 size_t ksize(const void *objp);
228
229 #ifdef CONFIG_PRINTK
230 bool kmem_valid_obj(void *object);
231 void kmem_dump_obj(void *object);
232 #endif
233
234 /*
235 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
236 * alignment larger than the alignment of a 64-bit integer.
237 * Setting ARCH_DMA_MINALIGN in arch headers allows that.
238 */
239 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
240 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
241 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
242 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
243 #else
244 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
245 #endif
246
247 /*
248 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
249 * Intended for arches that get misalignment faults even for 64 bit integer
250 * aligned buffers.
251 */
252 #ifndef ARCH_SLAB_MINALIGN
253 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
254 #endif
255
256 /*
257 * Arches can define this function if they want to decide the minimum slab
258 * alignment at runtime. The value returned by the function must be a power
259 * of two and >= ARCH_SLAB_MINALIGN.
260 */
261 #ifndef arch_slab_minalign
arch_slab_minalign(void)262 static inline unsigned int arch_slab_minalign(void)
263 {
264 return ARCH_SLAB_MINALIGN;
265 }
266 #endif
267
268 /*
269 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
270 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
271 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
272 */
273 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
274 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
275 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
276
277 /*
278 * Kmalloc array related definitions
279 */
280
281 #ifdef CONFIG_SLAB
282 /*
283 * SLAB and SLUB directly allocates requests fitting in to an order-1 page
284 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
285 */
286 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
287 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
288 #ifndef KMALLOC_SHIFT_LOW
289 #define KMALLOC_SHIFT_LOW 5
290 #endif
291 #endif
292
293 #ifdef CONFIG_SLUB
294 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
295 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
296 #ifndef KMALLOC_SHIFT_LOW
297 #define KMALLOC_SHIFT_LOW 3
298 #endif
299 #endif
300
301 #ifdef CONFIG_SLOB
302 /*
303 * SLOB passes all requests larger than one page to the page allocator.
304 * No kmalloc array is necessary since objects of different sizes can
305 * be allocated from the same page.
306 */
307 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
308 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
309 #ifndef KMALLOC_SHIFT_LOW
310 #define KMALLOC_SHIFT_LOW 3
311 #endif
312 #endif
313
314 /* Maximum allocatable size */
315 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
316 /* Maximum size for which we actually use a slab cache */
317 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
318 /* Maximum order allocatable via the slab allocator */
319 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
320
321 /*
322 * Kmalloc subsystem.
323 */
324 #ifndef KMALLOC_MIN_SIZE
325 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
326 #endif
327
328 /*
329 * This restriction comes from byte sized index implementation.
330 * Page size is normally 2^12 bytes and, in this case, if we want to use
331 * byte sized index which can represent 2^8 entries, the size of the object
332 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
333 * If minimum size of kmalloc is less than 16, we use it as minimum object
334 * size and give up to use byte sized index.
335 */
336 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
337 (KMALLOC_MIN_SIZE) : 16)
338
339 /*
340 * Whenever changing this, take care of that kmalloc_type() and
341 * create_kmalloc_caches() still work as intended.
342 *
343 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
344 * is for accounted but unreclaimable and non-dma objects. All the other
345 * kmem caches can have both accounted and unaccounted objects.
346 */
347 enum kmalloc_cache_type {
348 KMALLOC_NORMAL = 0,
349 #ifndef CONFIG_ZONE_DMA
350 KMALLOC_DMA = KMALLOC_NORMAL,
351 #endif
352 #ifndef CONFIG_MEMCG_KMEM
353 KMALLOC_CGROUP = KMALLOC_NORMAL,
354 #endif
355 #ifdef CONFIG_SLUB_TINY
356 KMALLOC_RECLAIM = KMALLOC_NORMAL,
357 #else
358 KMALLOC_RECLAIM,
359 #endif
360 #ifdef CONFIG_ZONE_DMA
361 KMALLOC_DMA,
362 #endif
363 #ifdef CONFIG_MEMCG_KMEM
364 KMALLOC_CGROUP,
365 #endif
366 NR_KMALLOC_TYPES
367 };
368
369 #ifndef CONFIG_SLOB
370 extern struct kmem_cache *
371 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
372
373 /*
374 * Define gfp bits that should not be set for KMALLOC_NORMAL.
375 */
376 #define KMALLOC_NOT_NORMAL_BITS \
377 (__GFP_RECLAIMABLE | \
378 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
379 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
380
kmalloc_type(gfp_t flags)381 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
382 {
383 /*
384 * The most common case is KMALLOC_NORMAL, so test for it
385 * with a single branch for all the relevant flags.
386 */
387 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
388 return KMALLOC_NORMAL;
389
390 /*
391 * At least one of the flags has to be set. Their priorities in
392 * decreasing order are:
393 * 1) __GFP_DMA
394 * 2) __GFP_RECLAIMABLE
395 * 3) __GFP_ACCOUNT
396 */
397 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
398 return KMALLOC_DMA;
399 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
400 return KMALLOC_RECLAIM;
401 else
402 return KMALLOC_CGROUP;
403 }
404
405 /*
406 * Figure out which kmalloc slab an allocation of a certain size
407 * belongs to.
408 * 0 = zero alloc
409 * 1 = 65 .. 96 bytes
410 * 2 = 129 .. 192 bytes
411 * n = 2^(n-1)+1 .. 2^n
412 *
413 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
414 * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
415 * Callers where !size_is_constant should only be test modules, where runtime
416 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
417 */
__kmalloc_index(size_t size,bool size_is_constant)418 static __always_inline unsigned int __kmalloc_index(size_t size,
419 bool size_is_constant)
420 {
421 if (!size)
422 return 0;
423
424 if (size <= KMALLOC_MIN_SIZE)
425 return KMALLOC_SHIFT_LOW;
426
427 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
428 return 1;
429 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
430 return 2;
431 if (size <= 8) return 3;
432 if (size <= 16) return 4;
433 if (size <= 32) return 5;
434 if (size <= 64) return 6;
435 if (size <= 128) return 7;
436 if (size <= 256) return 8;
437 if (size <= 512) return 9;
438 if (size <= 1024) return 10;
439 if (size <= 2 * 1024) return 11;
440 if (size <= 4 * 1024) return 12;
441 if (size <= 8 * 1024) return 13;
442 if (size <= 16 * 1024) return 14;
443 if (size <= 32 * 1024) return 15;
444 if (size <= 64 * 1024) return 16;
445 if (size <= 128 * 1024) return 17;
446 if (size <= 256 * 1024) return 18;
447 if (size <= 512 * 1024) return 19;
448 if (size <= 1024 * 1024) return 20;
449 if (size <= 2 * 1024 * 1024) return 21;
450
451 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
452 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
453 else
454 BUG();
455
456 /* Will never be reached. Needed because the compiler may complain */
457 return -1;
458 }
459 static_assert(PAGE_SHIFT <= 20);
460 #define kmalloc_index(s) __kmalloc_index(s, true)
461 #endif /* !CONFIG_SLOB */
462
463 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
464
465 /**
466 * kmem_cache_alloc - Allocate an object
467 * @cachep: The cache to allocate from.
468 * @flags: See kmalloc().
469 *
470 * Allocate an object from this cache.
471 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
472 *
473 * Return: pointer to the new object or %NULL in case of error
474 */
475 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
476 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
477 gfp_t gfpflags) __assume_slab_alignment __malloc;
478 void kmem_cache_free(struct kmem_cache *s, void *objp);
479
480 /*
481 * Bulk allocation and freeing operations. These are accelerated in an
482 * allocator specific way to avoid taking locks repeatedly or building
483 * metadata structures unnecessarily.
484 *
485 * Note that interrupts must be enabled when calling these functions.
486 */
487 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
488 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
489
490 /*
491 * Caller must not use kfree_bulk() on memory not originally allocated
492 * by kmalloc(), because the SLOB allocator cannot handle this.
493 */
kfree_bulk(size_t size,void ** p)494 static __always_inline void kfree_bulk(size_t size, void **p)
495 {
496 kmem_cache_free_bulk(NULL, size, p);
497 }
498
499 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
500 __alloc_size(1);
501 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
502 __malloc;
503
504 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
505 __assume_kmalloc_alignment __alloc_size(3);
506
507 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
508 int node, size_t size) __assume_kmalloc_alignment
509 __alloc_size(4);
510 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
511 __alloc_size(1);
512
513 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
514 __alloc_size(1);
515
516 /**
517 * kmalloc - allocate kernel memory
518 * @size: how many bytes of memory are required.
519 * @flags: describe the allocation context
520 *
521 * kmalloc is the normal method of allocating memory
522 * for objects smaller than page size in the kernel.
523 *
524 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
525 * bytes. For @size of power of two bytes, the alignment is also guaranteed
526 * to be at least to the size.
527 *
528 * The @flags argument may be one of the GFP flags defined at
529 * include/linux/gfp.h and described at
530 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
531 *
532 * The recommended usage of the @flags is described at
533 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
534 *
535 * Below is a brief outline of the most useful GFP flags
536 *
537 * %GFP_KERNEL
538 * Allocate normal kernel ram. May sleep.
539 *
540 * %GFP_NOWAIT
541 * Allocation will not sleep.
542 *
543 * %GFP_ATOMIC
544 * Allocation will not sleep. May use emergency pools.
545 *
546 * Also it is possible to set different flags by OR'ing
547 * in one or more of the following additional @flags:
548 *
549 * %__GFP_ZERO
550 * Zero the allocated memory before returning. Also see kzalloc().
551 *
552 * %__GFP_HIGH
553 * This allocation has high priority and may use emergency pools.
554 *
555 * %__GFP_NOFAIL
556 * Indicate that this allocation is in no way allowed to fail
557 * (think twice before using).
558 *
559 * %__GFP_NORETRY
560 * If memory is not immediately available,
561 * then give up at once.
562 *
563 * %__GFP_NOWARN
564 * If allocation fails, don't issue any warnings.
565 *
566 * %__GFP_RETRY_MAYFAIL
567 * Try really hard to succeed the allocation but fail
568 * eventually.
569 */
570 #ifndef CONFIG_SLOB
kmalloc(size_t size,gfp_t flags)571 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
572 {
573 if (__builtin_constant_p(size) && size) {
574 unsigned int index;
575
576 if (size > KMALLOC_MAX_CACHE_SIZE)
577 return kmalloc_large(size, flags);
578
579 index = kmalloc_index(size);
580 return kmalloc_trace(
581 kmalloc_caches[kmalloc_type(flags)][index],
582 flags, size);
583 }
584 return __kmalloc(size, flags);
585 }
586 #else
kmalloc(size_t size,gfp_t flags)587 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
588 {
589 if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
590 return kmalloc_large(size, flags);
591
592 return __kmalloc(size, flags);
593 }
594 #endif
595
596 #ifndef CONFIG_SLOB
kmalloc_node(size_t size,gfp_t flags,int node)597 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
598 {
599 if (__builtin_constant_p(size) && size) {
600 unsigned int index;
601
602 if (size > KMALLOC_MAX_CACHE_SIZE)
603 return kmalloc_large_node(size, flags, node);
604
605 index = kmalloc_index(size);
606 return kmalloc_node_trace(
607 kmalloc_caches[kmalloc_type(flags)][index],
608 flags, node, size);
609 }
610 return __kmalloc_node(size, flags, node);
611 }
612 #else
kmalloc_node(size_t size,gfp_t flags,int node)613 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
614 {
615 if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
616 return kmalloc_large_node(size, flags, node);
617
618 return __kmalloc_node(size, flags, node);
619 }
620 #endif
621
622 /**
623 * kmalloc_array - allocate memory for an array.
624 * @n: number of elements.
625 * @size: element size.
626 * @flags: the type of memory to allocate (see kmalloc).
627 */
kmalloc_array(size_t n,size_t size,gfp_t flags)628 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
629 {
630 size_t bytes;
631
632 if (unlikely(check_mul_overflow(n, size, &bytes)))
633 return NULL;
634 if (__builtin_constant_p(n) && __builtin_constant_p(size))
635 return kmalloc(bytes, flags);
636 return __kmalloc(bytes, flags);
637 }
638
639 /**
640 * krealloc_array - reallocate memory for an array.
641 * @p: pointer to the memory chunk to reallocate
642 * @new_n: new number of elements to alloc
643 * @new_size: new size of a single member of the array
644 * @flags: the type of memory to allocate (see kmalloc)
645 */
krealloc_array(void * p,size_t new_n,size_t new_size,gfp_t flags)646 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
647 size_t new_n,
648 size_t new_size,
649 gfp_t flags)
650 {
651 size_t bytes;
652
653 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
654 return NULL;
655
656 return krealloc(p, bytes, flags);
657 }
658
659 /**
660 * kcalloc - allocate memory for an array. The memory is set to zero.
661 * @n: number of elements.
662 * @size: element size.
663 * @flags: the type of memory to allocate (see kmalloc).
664 */
kcalloc(size_t n,size_t size,gfp_t flags)665 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
666 {
667 return kmalloc_array(n, size, flags | __GFP_ZERO);
668 }
669
670 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
671 unsigned long caller) __alloc_size(1);
672 #define kmalloc_node_track_caller(size, flags, node) \
673 __kmalloc_node_track_caller(size, flags, node, \
674 _RET_IP_)
675
676 /*
677 * kmalloc_track_caller is a special version of kmalloc that records the
678 * calling function of the routine calling it for slab leak tracking instead
679 * of just the calling function (confusing, eh?).
680 * It's useful when the call to kmalloc comes from a widely-used standard
681 * allocator where we care about the real place the memory allocation
682 * request comes from.
683 */
684 #define kmalloc_track_caller(size, flags) \
685 __kmalloc_node_track_caller(size, flags, \
686 NUMA_NO_NODE, _RET_IP_)
687
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)688 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
689 int node)
690 {
691 size_t bytes;
692
693 if (unlikely(check_mul_overflow(n, size, &bytes)))
694 return NULL;
695 if (__builtin_constant_p(n) && __builtin_constant_p(size))
696 return kmalloc_node(bytes, flags, node);
697 return __kmalloc_node(bytes, flags, node);
698 }
699
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)700 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
701 {
702 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
703 }
704
705 /*
706 * Shortcuts
707 */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)708 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
709 {
710 return kmem_cache_alloc(k, flags | __GFP_ZERO);
711 }
712
713 /**
714 * kzalloc - allocate memory. The memory is set to zero.
715 * @size: how many bytes of memory are required.
716 * @flags: the type of memory to allocate (see kmalloc).
717 */
kzalloc(size_t size,gfp_t flags)718 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
719 {
720 return kmalloc(size, flags | __GFP_ZERO);
721 }
722
723 /**
724 * kzalloc_node - allocate zeroed memory from a particular memory node.
725 * @size: how many bytes of memory are required.
726 * @flags: the type of memory to allocate (see kmalloc).
727 * @node: memory node from which to allocate
728 */
kzalloc_node(size_t size,gfp_t flags,int node)729 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
730 {
731 return kmalloc_node(size, flags | __GFP_ZERO, node);
732 }
733
734 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
kvmalloc(size_t size,gfp_t flags)735 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
736 {
737 return kvmalloc_node(size, flags, NUMA_NO_NODE);
738 }
kvzalloc_node(size_t size,gfp_t flags,int node)739 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
740 {
741 return kvmalloc_node(size, flags | __GFP_ZERO, node);
742 }
kvzalloc(size_t size,gfp_t flags)743 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
744 {
745 return kvmalloc(size, flags | __GFP_ZERO);
746 }
747
kvmalloc_array(size_t n,size_t size,gfp_t flags)748 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
749 {
750 size_t bytes;
751
752 if (unlikely(check_mul_overflow(n, size, &bytes)))
753 return NULL;
754
755 return kvmalloc(bytes, flags);
756 }
757
kvcalloc(size_t n,size_t size,gfp_t flags)758 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
759 {
760 return kvmalloc_array(n, size, flags | __GFP_ZERO);
761 }
762
763 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
764 __realloc_size(3);
765 extern void kvfree(const void *addr);
766 extern void kvfree_sensitive(const void *addr, size_t len);
767
768 unsigned int kmem_cache_size(struct kmem_cache *s);
769
770 /**
771 * kmalloc_size_roundup - Report allocation bucket size for the given size
772 *
773 * @size: Number of bytes to round up from.
774 *
775 * This returns the number of bytes that would be available in a kmalloc()
776 * allocation of @size bytes. For example, a 126 byte request would be
777 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
778 * for the general-purpose kmalloc()-based allocations, and is not for the
779 * pre-sized kmem_cache_alloc()-based allocations.)
780 *
781 * Use this to kmalloc() the full bucket size ahead of time instead of using
782 * ksize() to query the size after an allocation.
783 */
784 size_t kmalloc_size_roundup(size_t size);
785
786 void __init kmem_cache_init_late(void);
787
788 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
789 int slab_prepare_cpu(unsigned int cpu);
790 int slab_dead_cpu(unsigned int cpu);
791 #else
792 #define slab_prepare_cpu NULL
793 #define slab_dead_cpu NULL
794 #endif
795
796 #endif /* _LINUX_SLAB_H */
797