1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/stacktrace.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bug.h>
33
34 #include "kasan.h"
35 #include "../slab.h"
36
37 /*
38 * All functions below always inlined so compiler could
39 * perform better optimizations in each of __asan_loadX/__assn_storeX
40 * depending on memory access size X.
41 */
42
memory_is_poisoned_1(unsigned long addr)43 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
44 {
45 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
46
47 if (unlikely(shadow_value)) {
48 s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
49 return unlikely(last_accessible_byte >= shadow_value);
50 }
51
52 return false;
53 }
54
memory_is_poisoned_2_4_8(unsigned long addr,unsigned long size)55 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
56 unsigned long size)
57 {
58 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
59
60 /*
61 * Access crosses 8(shadow size)-byte boundary. Such access maps
62 * into 2 shadow bytes, so we need to check them both.
63 */
64 if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
65 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
66
67 return memory_is_poisoned_1(addr + size - 1);
68 }
69
memory_is_poisoned_16(unsigned long addr)70 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
71 {
72 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
73
74 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
75 if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
76 return *shadow_addr || memory_is_poisoned_1(addr + 15);
77
78 return *shadow_addr;
79 }
80
bytes_is_nonzero(const u8 * start,size_t size)81 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
82 size_t size)
83 {
84 while (size) {
85 if (unlikely(*start))
86 return (unsigned long)start;
87 start++;
88 size--;
89 }
90
91 return 0;
92 }
93
memory_is_nonzero(const void * start,const void * end)94 static __always_inline unsigned long memory_is_nonzero(const void *start,
95 const void *end)
96 {
97 unsigned int words;
98 unsigned long ret;
99 unsigned int prefix = (unsigned long)start % 8;
100
101 if (end - start <= 16)
102 return bytes_is_nonzero(start, end - start);
103
104 if (prefix) {
105 prefix = 8 - prefix;
106 ret = bytes_is_nonzero(start, prefix);
107 if (unlikely(ret))
108 return ret;
109 start += prefix;
110 }
111
112 words = (end - start) / 8;
113 while (words) {
114 if (unlikely(*(u64 *)start))
115 return bytes_is_nonzero(start, 8);
116 start += 8;
117 words--;
118 }
119
120 return bytes_is_nonzero(start, (end - start) % 8);
121 }
122
memory_is_poisoned_n(unsigned long addr,size_t size)123 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
124 size_t size)
125 {
126 unsigned long ret;
127
128 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
129 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
130
131 if (unlikely(ret)) {
132 unsigned long last_byte = addr + size - 1;
133 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
134
135 if (unlikely(ret != (unsigned long)last_shadow ||
136 ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
137 return true;
138 }
139 return false;
140 }
141
memory_is_poisoned(unsigned long addr,size_t size)142 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
143 {
144 if (__builtin_constant_p(size)) {
145 switch (size) {
146 case 1:
147 return memory_is_poisoned_1(addr);
148 case 2:
149 case 4:
150 case 8:
151 return memory_is_poisoned_2_4_8(addr, size);
152 case 16:
153 return memory_is_poisoned_16(addr);
154 default:
155 BUILD_BUG();
156 }
157 }
158
159 return memory_is_poisoned_n(addr, size);
160 }
161
check_region_inline(unsigned long addr,size_t size,bool write,unsigned long ret_ip)162 static __always_inline bool check_region_inline(unsigned long addr,
163 size_t size, bool write,
164 unsigned long ret_ip)
165 {
166 if (!kasan_arch_is_ready())
167 return true;
168
169 if (unlikely(size == 0))
170 return true;
171
172 if (unlikely(addr + size < addr))
173 return !kasan_report(addr, size, write, ret_ip);
174
175 if (unlikely(!addr_has_metadata((void *)addr)))
176 return !kasan_report(addr, size, write, ret_ip);
177
178 if (likely(!memory_is_poisoned(addr, size)))
179 return true;
180
181 return !kasan_report(addr, size, write, ret_ip);
182 }
183
kasan_check_range(unsigned long addr,size_t size,bool write,unsigned long ret_ip)184 bool kasan_check_range(unsigned long addr, size_t size, bool write,
185 unsigned long ret_ip)
186 {
187 return check_region_inline(addr, size, write, ret_ip);
188 }
189
kasan_byte_accessible(const void * addr)190 bool kasan_byte_accessible(const void *addr)
191 {
192 s8 shadow_byte;
193
194 if (!kasan_arch_is_ready())
195 return true;
196
197 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
198
199 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
200 }
201
kasan_cache_shrink(struct kmem_cache * cache)202 void kasan_cache_shrink(struct kmem_cache *cache)
203 {
204 kasan_quarantine_remove_cache(cache);
205 }
206
kasan_cache_shutdown(struct kmem_cache * cache)207 void kasan_cache_shutdown(struct kmem_cache *cache)
208 {
209 if (!__kmem_cache_empty(cache))
210 kasan_quarantine_remove_cache(cache);
211 }
212
register_global(struct kasan_global * global)213 static void register_global(struct kasan_global *global)
214 {
215 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
216
217 kasan_unpoison(global->beg, global->size, false);
218
219 kasan_poison(global->beg + aligned_size,
220 global->size_with_redzone - aligned_size,
221 KASAN_GLOBAL_REDZONE, false);
222 }
223
__asan_register_globals(struct kasan_global * globals,size_t size)224 void __asan_register_globals(struct kasan_global *globals, size_t size)
225 {
226 int i;
227
228 for (i = 0; i < size; i++)
229 register_global(&globals[i]);
230 }
231 EXPORT_SYMBOL(__asan_register_globals);
232
__asan_unregister_globals(struct kasan_global * globals,size_t size)233 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
234 {
235 }
236 EXPORT_SYMBOL(__asan_unregister_globals);
237
238 #define DEFINE_ASAN_LOAD_STORE(size) \
239 void __asan_load##size(unsigned long addr) \
240 { \
241 check_region_inline(addr, size, false, _RET_IP_); \
242 } \
243 EXPORT_SYMBOL(__asan_load##size); \
244 __alias(__asan_load##size) \
245 void __asan_load##size##_noabort(unsigned long); \
246 EXPORT_SYMBOL(__asan_load##size##_noabort); \
247 void __asan_store##size(unsigned long addr) \
248 { \
249 check_region_inline(addr, size, true, _RET_IP_); \
250 } \
251 EXPORT_SYMBOL(__asan_store##size); \
252 __alias(__asan_store##size) \
253 void __asan_store##size##_noabort(unsigned long); \
254 EXPORT_SYMBOL(__asan_store##size##_noabort)
255
256 DEFINE_ASAN_LOAD_STORE(1);
257 DEFINE_ASAN_LOAD_STORE(2);
258 DEFINE_ASAN_LOAD_STORE(4);
259 DEFINE_ASAN_LOAD_STORE(8);
260 DEFINE_ASAN_LOAD_STORE(16);
261
__asan_loadN(unsigned long addr,size_t size)262 void __asan_loadN(unsigned long addr, size_t size)
263 {
264 kasan_check_range(addr, size, false, _RET_IP_);
265 }
266 EXPORT_SYMBOL(__asan_loadN);
267
268 __alias(__asan_loadN)
269 void __asan_loadN_noabort(unsigned long, size_t);
270 EXPORT_SYMBOL(__asan_loadN_noabort);
271
__asan_storeN(unsigned long addr,size_t size)272 void __asan_storeN(unsigned long addr, size_t size)
273 {
274 kasan_check_range(addr, size, true, _RET_IP_);
275 }
276 EXPORT_SYMBOL(__asan_storeN);
277
278 __alias(__asan_storeN)
279 void __asan_storeN_noabort(unsigned long, size_t);
280 EXPORT_SYMBOL(__asan_storeN_noabort);
281
282 /* to shut up compiler complaints */
__asan_handle_no_return(void)283 void __asan_handle_no_return(void) {}
284 EXPORT_SYMBOL(__asan_handle_no_return);
285
286 /* Emitted by compiler to poison alloca()ed objects. */
__asan_alloca_poison(unsigned long addr,size_t size)287 void __asan_alloca_poison(unsigned long addr, size_t size)
288 {
289 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
290 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
291 rounded_up_size;
292 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
293
294 const void *left_redzone = (const void *)(addr -
295 KASAN_ALLOCA_REDZONE_SIZE);
296 const void *right_redzone = (const void *)(addr + rounded_up_size);
297
298 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
299
300 kasan_unpoison((const void *)(addr + rounded_down_size),
301 size - rounded_down_size, false);
302 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
303 KASAN_ALLOCA_LEFT, false);
304 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
305 KASAN_ALLOCA_RIGHT, false);
306 }
307 EXPORT_SYMBOL(__asan_alloca_poison);
308
309 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
__asan_allocas_unpoison(const void * stack_top,const void * stack_bottom)310 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
311 {
312 if (unlikely(!stack_top || stack_top > stack_bottom))
313 return;
314
315 kasan_unpoison(stack_top, stack_bottom - stack_top, false);
316 }
317 EXPORT_SYMBOL(__asan_allocas_unpoison);
318
319 /* Emitted by the compiler to [un]poison local variables. */
320 #define DEFINE_ASAN_SET_SHADOW(byte) \
321 void __asan_set_shadow_##byte(const void *addr, size_t size) \
322 { \
323 __memset((void *)addr, 0x##byte, size); \
324 } \
325 EXPORT_SYMBOL(__asan_set_shadow_##byte)
326
327 DEFINE_ASAN_SET_SHADOW(00);
328 DEFINE_ASAN_SET_SHADOW(f1);
329 DEFINE_ASAN_SET_SHADOW(f2);
330 DEFINE_ASAN_SET_SHADOW(f3);
331 DEFINE_ASAN_SET_SHADOW(f5);
332 DEFINE_ASAN_SET_SHADOW(f8);
333
334 /* Only allow cache merging when no per-object metadata is present. */
kasan_never_merge(void)335 slab_flags_t kasan_never_merge(void)
336 {
337 if (!kasan_requires_meta())
338 return 0;
339 return SLAB_KASAN;
340 }
341
342 /*
343 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
344 * For larger allocations larger redzones are used.
345 */
optimal_redzone(unsigned int object_size)346 static inline unsigned int optimal_redzone(unsigned int object_size)
347 {
348 return
349 object_size <= 64 - 16 ? 16 :
350 object_size <= 128 - 32 ? 32 :
351 object_size <= 512 - 64 ? 64 :
352 object_size <= 4096 - 128 ? 128 :
353 object_size <= (1 << 14) - 256 ? 256 :
354 object_size <= (1 << 15) - 512 ? 512 :
355 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
356 }
357
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)358 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
359 slab_flags_t *flags)
360 {
361 unsigned int ok_size;
362 unsigned int optimal_size;
363
364 if (!kasan_requires_meta())
365 return;
366
367 /*
368 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
369 * and that thus have per-object metadata.
370 * Currently this flag is used in two places:
371 * 1. In slab_ksize() to account for per-object metadata when
372 * calculating the size of the accessible memory within the object.
373 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
374 * caches with per-object metadata.
375 */
376 *flags |= SLAB_KASAN;
377
378 ok_size = *size;
379
380 /* Add alloc meta into redzone. */
381 cache->kasan_info.alloc_meta_offset = *size;
382 *size += sizeof(struct kasan_alloc_meta);
383
384 /*
385 * If alloc meta doesn't fit, don't add it.
386 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
387 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
388 * larger sizes.
389 */
390 if (*size > KMALLOC_MAX_SIZE) {
391 cache->kasan_info.alloc_meta_offset = 0;
392 *size = ok_size;
393 /* Continue, since free meta might still fit. */
394 }
395
396 /*
397 * Add free meta into redzone when it's not possible to store
398 * it in the object. This is the case when:
399 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
400 * be touched after it was freed, or
401 * 2. Object has a constructor, which means it's expected to
402 * retain its content until the next allocation, or
403 * 3. Object is too small.
404 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
405 */
406 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
407 cache->object_size < sizeof(struct kasan_free_meta)) {
408 ok_size = *size;
409
410 cache->kasan_info.free_meta_offset = *size;
411 *size += sizeof(struct kasan_free_meta);
412
413 /* If free meta doesn't fit, don't add it. */
414 if (*size > KMALLOC_MAX_SIZE) {
415 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
416 *size = ok_size;
417 }
418 }
419
420 /* Calculate size with optimal redzone. */
421 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
422 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
423 if (optimal_size > KMALLOC_MAX_SIZE)
424 optimal_size = KMALLOC_MAX_SIZE;
425 /* Use optimal size if the size with added metas is not large enough. */
426 if (*size < optimal_size)
427 *size = optimal_size;
428 }
429
kasan_get_alloc_meta(struct kmem_cache * cache,const void * object)430 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
431 const void *object)
432 {
433 if (!cache->kasan_info.alloc_meta_offset)
434 return NULL;
435 return (void *)object + cache->kasan_info.alloc_meta_offset;
436 }
437
kasan_get_free_meta(struct kmem_cache * cache,const void * object)438 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
439 const void *object)
440 {
441 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
442 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
443 return NULL;
444 return (void *)object + cache->kasan_info.free_meta_offset;
445 }
446
kasan_init_object_meta(struct kmem_cache * cache,const void * object)447 void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
448 {
449 struct kasan_alloc_meta *alloc_meta;
450
451 alloc_meta = kasan_get_alloc_meta(cache, object);
452 if (alloc_meta)
453 __memset(alloc_meta, 0, sizeof(*alloc_meta));
454 }
455
kasan_metadata_size(struct kmem_cache * cache,bool in_object)456 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
457 {
458 struct kasan_cache *info = &cache->kasan_info;
459
460 if (!kasan_requires_meta())
461 return 0;
462
463 if (in_object)
464 return (info->free_meta_offset ?
465 0 : sizeof(struct kasan_free_meta));
466 else
467 return (info->alloc_meta_offset ?
468 sizeof(struct kasan_alloc_meta) : 0) +
469 ((info->free_meta_offset &&
470 info->free_meta_offset != KASAN_NO_FREE_META) ?
471 sizeof(struct kasan_free_meta) : 0);
472 }
473
__kasan_record_aux_stack(void * addr,bool can_alloc)474 static void __kasan_record_aux_stack(void *addr, bool can_alloc)
475 {
476 struct slab *slab = kasan_addr_to_slab(addr);
477 struct kmem_cache *cache;
478 struct kasan_alloc_meta *alloc_meta;
479 void *object;
480
481 if (is_kfence_address(addr) || !slab)
482 return;
483
484 cache = slab->slab_cache;
485 object = nearest_obj(cache, slab, addr);
486 alloc_meta = kasan_get_alloc_meta(cache, object);
487 if (!alloc_meta)
488 return;
489
490 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
491 alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc);
492 }
493
kasan_record_aux_stack(void * addr)494 void kasan_record_aux_stack(void *addr)
495 {
496 return __kasan_record_aux_stack(addr, true);
497 }
498
kasan_record_aux_stack_noalloc(void * addr)499 void kasan_record_aux_stack_noalloc(void *addr)
500 {
501 return __kasan_record_aux_stack(addr, false);
502 }
503
kasan_save_alloc_info(struct kmem_cache * cache,void * object,gfp_t flags)504 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
505 {
506 struct kasan_alloc_meta *alloc_meta;
507
508 alloc_meta = kasan_get_alloc_meta(cache, object);
509 if (alloc_meta)
510 kasan_set_track(&alloc_meta->alloc_track, flags);
511 }
512
kasan_save_free_info(struct kmem_cache * cache,void * object)513 void kasan_save_free_info(struct kmem_cache *cache, void *object)
514 {
515 struct kasan_free_meta *free_meta;
516
517 free_meta = kasan_get_free_meta(cache, object);
518 if (!free_meta)
519 return;
520
521 kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
522 /* The object was freed and has free track set. */
523 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
524 }
525