| /mm/kmsan/ |
| A D | shadow.c | 85 void *shadow; in kmsan_get_shadow_origin_ptr() local 98 if (!shadow) in kmsan_get_shadow_origin_ptr() 101 ret.shadow = shadow; in kmsan_get_shadow_origin_ptr() 108 ret.shadow = dummy_store_page; in kmsan_get_shadow_origin_ptr() 112 ret.shadow = dummy_load_page; in kmsan_get_shadow_origin_ptr() 173 struct page *shadow, *origin; in kmsan_alloc_page() local 180 shadow = shadow_page_for(page); in kmsan_alloc_page() 278 void *shadow, *origin; in kmsan_init_alloc_meta_for_range() local 289 shadow_p = virt_to_page((char *)shadow + addr); in kmsan_init_alloc_meta_for_range() 302 set_no_shadow_origin_page(&shadow[i]); in kmsan_setup_meta() [all …]
|
| A D | init.c | 99 struct page *shadow, *origin; member 118 struct page *shadow, *origin; in kmsan_memblock_free_pages() local 120 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 121 held_back[order].shadow = page; in kmsan_memblock_free_pages() 128 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 130 kmsan_setup_meta(page, shadow, origin, order); in kmsan_memblock_free_pages() 132 held_back[order].shadow = NULL; in kmsan_memblock_free_pages() 170 struct page *page, *shadow, *origin; in do_collection() local 174 shadow = smallstack_pop(&collect); in do_collection() 218 if (held_back[i].shadow) in kmsan_memblock_discard() [all …]
|
| A D | instrumentation.c | 144 *shadow = *(u64 *)(ctx->cstate.param_tls); in get_param0_metadata() 152 *(u64 *)(ctx->cstate.retval_tls) = shadow; in set_retval_metadata() 162 u64 shadow; in __msan_memmove() local 164 get_param0_metadata(&shadow, &origin); in __msan_memmove() 176 set_retval_metadata(shadow, origin); in __msan_memmove() 187 u64 shadow; in __msan_memcpy() local 189 get_param0_metadata(&shadow, &origin); in __msan_memcpy() 203 set_retval_metadata(shadow, origin); in __msan_memcpy() 214 u64 shadow; in __msan_memset() local 216 get_param0_metadata(&shadow, &origin); in __msan_memset() [all …]
|
| A D | hooks.c | 152 struct page *shadow, *origin; in kmsan_ioremap_page_range() local 162 shadow = alloc_pages(gfp_mask, 1); in kmsan_ioremap_page_range() 164 if (!shadow || !origin) { in kmsan_ioremap_page_range() 170 vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow, in kmsan_ioremap_page_range() 176 shadow = NULL; in kmsan_ioremap_page_range() 200 if (shadow) in kmsan_ioremap_page_range() 201 __free_pages(shadow, 1); in kmsan_ioremap_page_range() 220 struct page *shadow, *origin; in kmsan_iounmap_page_range() local 232 shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow); in kmsan_iounmap_page_range() 236 if (shadow) in kmsan_iounmap_page_range() [all …]
|
| A D | core.c | 258 unsigned char *shadow = NULL; in kmsan_internal_check_memory() local 269 shadow = kmsan_get_metadata((void *)(addr64 + pos), in kmsan_internal_check_memory() 271 if (!shadow) { in kmsan_internal_check_memory() 287 if (!shadow[i]) { in kmsan_internal_check_memory()
|
| A D | kmsan.h | 42 void *shadow, *origin; member 169 void kmsan_setup_meta(struct page *page, struct page *shadow,
|
| A D | Makefile | 6 obj-y := core.o instrumentation.o init.o hooks.o report.o shadow.o
|
| /mm/ |
| A D | workingset.c | 213 unsigned long entry = xa_to_value(shadow); in unpack_shadow() 264 static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, in lru_gen_test_recent() argument 272 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); in lru_gen_test_recent() 283 static void lru_gen_refault(struct folio *folio, void *shadow) in lru_gen_refault() argument 296 recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset); in lru_gen_refault() 333 static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, in lru_gen_test_recent() argument 339 static void lru_gen_refault(struct folio *folio, void *shadow) in lru_gen_refault() argument 440 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); in workingset_test_recent() 534 void workingset_refault(struct folio *folio, void *shadow) in workingset_refault() argument 546 lru_gen_refault(folio, shadow); in workingset_refault() [all …]
|
| A D | swap_state.c | 76 void *shadow; in get_shadow_from_swap_cache() local 78 shadow = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache() 79 if (xa_is_value(shadow)) in get_shadow_from_swap_cache() 80 return shadow; in get_shadow_from_swap_cache() 142 swp_entry_t entry, void *shadow) in __delete_from_swap_cache() argument 157 void *entry = xas_store(&xas, shadow); in __delete_from_swap_cache() 372 void *shadow = NULL; in __read_swap_cache_async() local 445 if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async() 450 if (shadow) in __read_swap_cache_async() 451 workingset_refault(new_folio, shadow); in __read_swap_cache_async()
|
| A D | swap.h | 60 swp_entry_t entry, void *shadow); 200 swp_entry_t entry, void *shadow) in __delete_from_swap_cache() argument
|
| A D | filemap.c | 129 struct folio *folio, void *shadow) in page_cache_delete() argument 141 xas_store(&xas, shadow); in page_cache_delete() 218 void __filemap_remove_folio(struct folio *folio, void *shadow) in __filemap_remove_folio() argument 224 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio() 961 void *shadow = NULL; in filemap_add_folio() local 969 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio() 983 if (!(gfp & __GFP_WRITE) && shadow) in filemap_add_folio() 984 workingset_refault(folio, shadow); in filemap_add_folio() 4470 void *shadow = (void *)folio; in filemap_cachestat() local 4494 shadow = get_shadow_from_swap_cache(swp); in filemap_cachestat() [all …]
|
| A D | vmscan.c | 739 void *shadow = NULL; in __remove_mapping() local 785 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping() 786 __delete_from_swap_cache(folio, swap, shadow); in __remove_mapping() 812 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping() 813 __filemap_remove_folio(folio, shadow); in __remove_mapping()
|
| A D | shmem.c | 2023 void *shadow; in shmem_swap_alloc_folio() local 2085 shadow = get_shadow_from_swap_cache(entry); in shmem_swap_alloc_folio() 2086 if (shadow) in shmem_swap_alloc_folio() 2087 workingset_refault(new, shadow); in shmem_swap_alloc_folio()
|
| A D | memory.c | 4467 void *shadow = NULL; in do_swap_page() local 4574 shadow = get_shadow_from_swap_cache(entry); in do_swap_page() 4575 if (shadow) in do_swap_page() 4576 workingset_refault(folio, shadow); in do_swap_page()
|
| A D | Kconfig | 1367 The architecture has hardware support for userspace shadow call
|
| /mm/kasan/ |
| A D | report_sw_tags.c | 51 u8 *shadow; in kasan_get_alloc_size() local 62 shadow = (u8 *)kasan_mem_to_shadow(object); in kasan_get_alloc_size() 64 if (*shadow != KASAN_TAG_INVALID) in kasan_get_alloc_size() 68 shadow++; in kasan_get_alloc_size() 81 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr); in kasan_print_tags() local 83 pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag, *shadow); in kasan_print_tags()
|
| A D | report_generic.c | 49 u8 *shadow; in kasan_get_alloc_size() local 60 shadow = (u8 *)kasan_mem_to_shadow(object); in kasan_get_alloc_size() 62 if (*shadow == 0) in kasan_get_alloc_size() 64 else if (*shadow >= 1 && *shadow <= KASAN_GRANULE_SIZE - 1) in kasan_get_alloc_size() 65 return size + *shadow; in kasan_get_alloc_size() 68 shadow++; in kasan_get_alloc_size()
|
| A D | sw_tags.c | 78 u8 *shadow_first, *shadow_last, *shadow; in kasan_check_range() local 114 for (shadow = shadow_first; shadow <= shadow_last; shadow++) { in kasan_check_range() 115 if (*shadow != tag) { in kasan_check_range()
|
| A D | Makefile | 52 obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o 54 obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
|
| A D | shadow.c | 157 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); in kasan_poison_last_granule() local 158 *shadow = size & KASAN_GRANULE_MASK; in kasan_poison_last_granule()
|