| /mm/ |
| A D | maccess.c | 22 __get_kernel_nofault(dst, src, type, err_label); \ 23 kmsan_check_memory(src, sizeof(type)); \ 25 src += sizeof(type); \ 34 align = (unsigned long)dst | (unsigned long)src; in copy_from_kernel_nofault() 36 if (!copy_from_kernel_nofault_allowed(src, size)) in copy_from_kernel_nofault() 60 src += sizeof(type); \ 88 const void *src = unsafe_addr; in strncpy_from_kernel_nofault() local 97 __get_kernel_nofault(dst, src, u8, Efault); in strncpy_from_kernel_nofault() 99 src++; in strncpy_from_kernel_nofault() 104 return src - unsafe_addr; in strncpy_from_kernel_nofault() [all …]
|
| A D | migrate.c | 220 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); in migrate_movable_ops_page() 221 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); in migrate_movable_ops_page() 222 rc = page_movable_ops(src)->migrate_page(dst, src, mode); in migrate_movable_ops_page() 436 .folio = src, in remove_migration_ptes() 714 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); in migrate_huge_page_move_mapping() 1060 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); in move_to_new_folio() 1131 remove_migration_ptes(src, src, 0); in migrate_folio_undo_src() 1159 folio_is_file_lru(src), -folio_nr_pages(src)); in migrate_folio_done() 1261 if (folio_test_anon(src) && !folio_test_ksm(src)) in migrate_folio_unmap() 1301 !folio_test_ksm(src) && !anon_vma, src); in migrate_folio_unmap() [all …]
|
| A D | vma_init.c | 43 dest->vm_mm = src->vm_mm; in vm_area_init_from() 44 dest->vm_ops = src->vm_ops; in vm_area_init_from() 45 dest->vm_start = src->vm_start; in vm_area_init_from() 46 dest->vm_end = src->vm_end; in vm_area_init_from() 47 dest->anon_vma = src->anon_vma; in vm_area_init_from() 48 dest->vm_pgoff = src->vm_pgoff; in vm_area_init_from() 49 dest->vm_file = src->vm_file; in vm_area_init_from() 51 vm_flags_init(dest, src->vm_flags); in vm_area_init_from() 62 dest->anon_name = src->anon_name; in vm_area_init_from() 69 dest->vm_region = src->vm_region; in vm_area_init_from() [all …]
|
| A D | migrate_device.c | 552 if (!args->src || !args->dst) in migrate_vma_setup() 559 memset(args->src, 0, sizeof(*args->src) * nr_pages); in migrate_vma_setup() 589 unsigned long *src) in migrate_vma_insert_page() argument 693 *src = MIGRATE_PFN_MIGRATE; in migrate_vma_insert_page() 699 *src &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_insert_page() 847 src = page_folio(page); in __migrate_device_finalize() 855 dst = src; in __migrate_device_finalize() 861 if (fault_folio != src) in __migrate_device_finalize() 862 folio_unlock(src); in __migrate_device_finalize() 863 folio_put(src); in __migrate_device_finalize() [all …]
|
| A D | util.c | 140 memcpy(p, src, len); in kmemdup_noprof() 172 void *kvmemdup(const void *src, size_t len, gfp_t gfp) in kvmemdup() argument 178 memcpy(p, src, len); in kvmemdup() 217 void *memdup_user(const void __user *src, size_t len) in memdup_user() argument 225 if (copy_from_user(p, src, len)) { in memdup_user() 243 void *vmemdup_user(const void __user *src, size_t len) in vmemdup_user() argument 251 if (copy_from_user(p, src, len)) { in vmemdup_user() 307 if (copy_from_user(p, src, len)) { in memdup_user_nul() 720 void folio_copy(struct folio *dst, struct folio *src) in folio_copy() argument 723 long nr = folio_nr_pages(src); in folio_copy() [all …]
|
| A D | list_lru.c | 444 struct list_lru_one *src, in memcg_reparent_list_lru_one() argument 450 spin_lock_irq(&src->lock); in memcg_reparent_list_lru_one() 454 list_splice_init(&src->list, &dst->list); in memcg_reparent_list_lru_one() 455 if (src->nr_items) { in memcg_reparent_list_lru_one() 456 WARN_ON(src->nr_items < 0); in memcg_reparent_list_lru_one() 457 dst->nr_items += src->nr_items; in memcg_reparent_list_lru_one() 461 src->nr_items = LONG_MIN; in memcg_reparent_list_lru_one() 464 spin_unlock_irq(&src->lock); in memcg_reparent_list_lru_one()
|
| A D | early_ioremap.c | 251 int __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) in copy_from_early_mem() argument 257 slop = offset_in_page(src); in copy_from_early_mem() 261 p = early_memremap(src & PAGE_MASK, clen + slop); in copy_from_early_mem() 267 src += clen; in copy_from_early_mem()
|
| A D | khugepaged.c | 704 struct folio *src, *tmp; in __collapse_huge_page_copy_succeeded() local 727 src = page_folio(src_page); in __collapse_huge_page_copy_succeeded() 729 if (folio_test_large(src)) { in __collapse_huge_page_copy_succeeded() 734 release_pte_folio(src); in __collapse_huge_page_copy_succeeded() 746 free_swap_cache(src); in __collapse_huge_page_copy_succeeded() 747 folio_put_refs(src, nr_ptes); in __collapse_huge_page_copy_succeeded() 752 list_del(&src->lru); in __collapse_huge_page_copy_succeeded() 754 folio_is_file_lru(src)); in __collapse_huge_page_copy_succeeded() 755 folio_unlock(src); in __collapse_huge_page_copy_succeeded() 756 free_swap_cache(src); in __collapse_huge_page_copy_succeeded() [all …]
|
| A D | numa_memblks.c | 181 struct numa_meminfo *src) in numa_move_tail_memblk() argument 183 dst->blk[dst->nr_blks++] = src->blk[idx]; in numa_move_tail_memblk() 184 numa_remove_memblk_from(idx, src); in numa_move_tail_memblk()
|
| A D | hugetlb.c | 4065 if (!src->demote_order) { in demote_pool_huge_page() 4079 remove_hugetlb_folio(src, folio, false); in demote_pool_huge_page() 4088 rc = demote_free_hugetlb_folios(src, dst, &list); in demote_pool_huge_page() 4094 add_hugetlb_folio(src, folio, false); in demote_pool_huge_page() 4107 src->max_huge_pages -= nr_demoted; in demote_pool_huge_page() 5572 raw_write_seqcount_begin(&src->write_protect_seq); in copy_hugetlb_page_range() 5611 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range() 5638 set_huge_pte_at(src, addr, src_pte, entry, sz); in copy_hugetlb_page_range() 5690 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range() 5715 huge_ptep_set_wrprotect(src, addr, src_pte); in copy_hugetlb_page_range() [all …]
|
| A D | mempolicy.c | 1359 static struct folio *alloc_migration_target_by_mpol(struct folio *src, in alloc_migration_target_by_mpol() argument 1369 order = folio_order(src); in alloc_migration_target_by_mpol() 1370 ilx += src->index >> order; in alloc_migration_target_by_mpol() 1372 if (folio_test_hugetlb(src)) { in alloc_migration_target_by_mpol() 1376 h = folio_hstate(src); in alloc_migration_target_by_mpol() 1383 if (folio_test_large(src)) in alloc_migration_target_by_mpol() 1404 static struct folio *alloc_migration_target_by_mpol(struct folio *src, in alloc_migration_target_by_mpol() argument 2728 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) in vma_dup_policy() argument 2730 struct mempolicy *pol = mpol_dup(src->vm_policy); in vma_dup_policy()
|
| A D | vma.c | 112 struct vm_area_struct *src = vmg->middle; /* exisitng merge case. */ in is_mergeable_anon_vma() local 121 VM_WARN_ON(src && src_anon != src->anon_vma); in is_mergeable_anon_vma() 125 return !vma_had_uncowed_parents(src); in is_mergeable_anon_vma() 612 struct vm_area_struct *src, struct vm_area_struct **dup) in dup_anon_vma() argument 628 if (src->anon_vma && !dst->anon_vma) { in dup_anon_vma() 632 dst->anon_vma = src->anon_vma; in dup_anon_vma() 633 ret = anon_vma_clone(dst, src); in dup_anon_vma()
|
| A D | zswap.c | 1010 u8 *src, *obj; in zswap_decompress() local 1021 src = obj; in zswap_decompress() 1025 src = acomp_ctx->buffer; in zswap_decompress() 1028 sg_init_one(&input, src, entry->length); in zswap_decompress()
|
| A D | memory.c | 3232 static inline int __wp_page_copy_user(struct page *dst, struct page *src, in __wp_page_copy_user() argument 3242 if (likely(src)) { in __wp_page_copy_user() 3243 if (copy_mc_user_highpage(dst, src, addr, vma)) in __wp_page_copy_user() 7060 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, in copy_user_gigantic_page() argument 7072 src_page = folio_page(src, i); in copy_user_gigantic_page() 7084 struct folio *src; member 7092 struct page *src = folio_page(copy_arg->src, idx); in copy_subpage() local 7094 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) in copy_subpage() 7099 int copy_user_large_folio(struct folio *dst, struct folio *src, in copy_user_large_folio() argument 7105 .src = src, in copy_user_large_folio() [all …]
|
| A D | secretmem.c | 148 struct folio *dst, struct folio *src, enum migrate_mode mode) in secretmem_migrate_folio() argument
|
| A D | vmscan.c | 1008 static struct folio *alloc_demote_folio(struct folio *src, in alloc_demote_folio() argument 1029 dst = alloc_migration_target(src, (unsigned long)mtc); in alloc_demote_folio() 1036 return alloc_migration_target(src, (unsigned long)mtc); in alloc_demote_folio() 1742 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios() local 1751 while (scan < nr_to_scan && !list_empty(src)) { in isolate_lru_folios() 1752 struct list_head *move_to = src; in isolate_lru_folios() 1755 folio = lru_to_folio(src); in isolate_lru_folios() 1756 prefetchw_prev_lru_folio(folio, src, flags); in isolate_lru_folios() 1815 list_splice(&folios_skipped, src); in isolate_lru_folios() 5586 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, in lru_gen_seq_write() argument [all …]
|
| /mm/damon/ |
| A D | core.c | 765 dst->nid = src->nid; in damos_commit_quota_goal_union() 775 dst->metric = src->metric; in damos_commit_quota_goal() 825 dst->ms = src->ms; in damos_commit_quota() 826 dst->sz = src->sz; in damos_commit_quota() 872 dst->type = src->type; in damos_commit_filter() 1024 dst->pattern = src->pattern; in damos_commit() 1025 dst->action = src->action; in damos_commit() 1032 dst->wmarks = src->wmarks; in damos_commit() 1133 get_pid(src->pid); in damon_commit_target() 1134 dst->pid = src->pid; in damon_commit_target() [all …]
|
| /mm/kasan/ |
| A D | shadow.c | 61 void *memmove(void *dest, const void *src, size_t len) in memmove() argument 63 if (!kasan_check_range(src, len, false, _RET_IP_) || in memmove() 67 return __memmove(dest, src, len); in memmove() 72 void *memcpy(void *dest, const void *src, size_t len) in memcpy() argument 74 if (!kasan_check_range(src, len, false, _RET_IP_) || in memcpy() 78 return __memcpy(dest, src, len); in memcpy() 94 if (!kasan_check_range(src, len, false, _RET_IP_) || in __asan_memmove() 98 return __memmove(dest, src, len); in __asan_memmove() 103 void *__asan_memcpy(void *dest, const void *src, ssize_t len) in __asan_memcpy() argument 105 if (!kasan_check_range(src, len, false, _RET_IP_) || in __asan_memcpy() [all …]
|
| A D | kasan.h | 636 void *__asan_memmove(void *dest, const void *src, ssize_t len); 637 void *__asan_memcpy(void *dest, const void *src, ssize_t len); 655 void *__hwasan_memmove(void *dest, const void *src, ssize_t len); 656 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len);
|
| A D | kasan_test_c.c | 1570 char *src; in kasan_strings() local 1582 src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO); in kasan_strings() 1583 strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE); in kasan_strings() 1594 strscpy(ptr, src + 1, KASAN_GRANULE_SIZE)); in kasan_strings() 1597 KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE, in kasan_strings() 1600 kfree(src); in kasan_strings()
|
| /mm/kmsan/ |
| A D | instrumentation.c | 157 void *__msan_memmove(void *dst, const void *src, uintptr_t n); 158 void *__msan_memmove(void *dst, const void *src, uintptr_t n) in __msan_memmove() argument 165 result = __memmove(dst, src, n); in __msan_memmove() 173 kmsan_internal_memmove_metadata(dst, (void *)src, n); in __msan_memmove() 182 void *__msan_memcpy(void *dst, const void *src, uintptr_t n); 183 void *__msan_memcpy(void *dst, const void *src, uintptr_t n) in __msan_memcpy() argument 190 result = __memcpy(dst, src, n); in __msan_memcpy() 200 kmsan_internal_memmove_metadata(dst, (void *)src, n); in __msan_memcpy()
|
| A D | kmsan_test.c | 423 const volatile void *src, size_t size) in memcpy_noinline() argument 425 return memcpy((void *)dst, (const void *)src, size); in memcpy_noinline() 432 volatile long long src; in test_init_memcpy() local 435 src = 1; in test_init_memcpy() 439 memcpy_noinline((void *)&dst, (void *)&src, sizeof(src)); in test_init_memcpy() 646 char buf[4], src[4]; in test_copy_from_kernel_nofault() local 654 ret = copy_from_kernel_nofault((char *)&buf[0], (char *)&src[0], size); in test_copy_from_kernel_nofault()
|
| A D | core.c | 83 void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n) in kmsan_internal_memmove_metadata() argument 100 shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW); in kmsan_internal_memmove_metadata() 106 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n)); in kmsan_internal_memmove_metadata() 109 origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN); in kmsan_internal_memmove_metadata() 112 backwards = dst > src; in kmsan_internal_memmove_metadata() 115 src_off = (u64)src % KMSAN_ORIGIN_SIZE; in kmsan_internal_memmove_metadata()
|
| A D | shadow.c | 151 void kmsan_copy_page_meta(struct page *dst, struct page *src) in kmsan_copy_page_meta() argument 157 if (!src || !page_has_metadata(src)) { in kmsan_copy_page_meta() 164 __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE); in kmsan_copy_page_meta() 165 __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE); in kmsan_copy_page_meta()
|
| A D | kmsan.h | 154 void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
|