Home
last modified time | relevance | path

Searched refs:addr (Results 1 – 25 of 92) sorted by relevance

1234

/mm/kasan/
A Dinit.c118 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in zero_pmd_populate()
139 } while (pmd++, addr = next, addr != end); in zero_pmd_populate()
152 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate()
177 } while (pud++, addr = next, addr != end); in zero_pud_populate()
219 } while (p4d++, addr = next, addr != end); in zero_p4d_populate()
281 } while (pgd++, addr = next, addr != end); in kasan_populate_early_shadow()
352 for (; addr < end; addr = next, pte++) { in kasan_remove_pte_table()
373 for (; addr < end; addr = next, pmd++) { in kasan_remove_pmd_table()
399 for (; addr < end; addr = next, pud++) { in kasan_remove_pud_table()
426 for (; addr < end; addr = next, p4d++) { in kasan_remove_p4d_table()
[all …]
A Dgeneric.c133 const void *last_byte = addr + size - 1; in memory_is_poisoned_n()
149 return memory_is_poisoned_1(addr); in memory_is_poisoned()
155 return memory_is_poisoned_16(addr); in memory_is_poisoned()
161 return memory_is_poisoned_n(addr, size); in memory_is_poisoned()
174 if (unlikely(addr + size < addr)) in check_region_inline()
177 if (unlikely(!addr_has_metadata(addr))) in check_region_inline()
242 void __asan_load##size(void *addr) \
250 void __asan_store##size(void *addr) \
531 void kasan_record_aux_stack(void *addr) in kasan_record_aux_stack() argument
538 if (is_kfence_address(addr) || !slab) in kasan_record_aux_stack()
[all …]
A Dkasan.h319 addr < (void *)KASAN_SHADOW_END; in addr_in_shadow()
333 return (kasan_reset_tag(addr) >= in addr_has_metadata()
353 return (is_vmalloc_addr(addr) || virt_addr_valid(addr)); in addr_has_metadata()
416 return addr; in arch_kasan_set_tag()
420 #define arch_kasan_get_tag(addr) 0 argument
423 #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) argument
424 #define get_tag(addr) arch_kasan_get_tag(addr) argument
435 #define hw_get_mem_tag(addr) arch_get_mem_tag(addr) argument
483 u8 tag = get_tag(addr); in kasan_unpoison()
494 u8 ptr_tag = get_tag(addr); in kasan_byte_accessible()
[all …]
A Dreport.c221 if (addr) in end_report()
288 if (virt_addr_valid(addr)) in addr_to_page()
382 describe_object(addr, info); in print_address_description()
386 if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) { in print_address_description()
388 pr_err(" %pS\n", addr); in print_address_description()
401 if (is_vmalloc_addr(addr)) { in print_address_description()
403 if (!vmalloc_dump_obj(addr)) in print_address_description()
417 return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW); in meta_row_is_guilty()
477 if (addr_has_metadata(addr)) in print_report()
498 info->first_bad_addr = addr; in complete_report_info()
[all …]
A Dsw_tags.c84 if (unlikely(addr + size < addr)) in kasan_check_range()
85 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
87 tag = get_tag((const void *)addr); in kasan_check_range()
111 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
116 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
123 bool kasan_byte_accessible(const void *addr) in kasan_byte_accessible() argument
125 u8 tag = get_tag(addr); in kasan_byte_accessible()
126 void *untagged_addr = kasan_reset_tag(addr); in kasan_byte_accessible()
156 kasan_check_range(addr, size, false, _RET_IP_); in __hwasan_loadN_noabort()
162 kasan_check_range(addr, size, true, _RET_IP_); in __hwasan_storeN_noabort()
[all …]
A Dshadow.c56 return __memset(addr, c, len); in memset()
87 return __memset(addr, c, len); in __asan_memset()
136 addr = kasan_reset_tag(addr); in kasan_poison()
165 u8 tag = get_tag(addr); in kasan_unpoison()
172 addr = kasan_reset_tag(addr); in kasan_unpoison()
188 pgd_t *pgd = pgd_offset_k(addr); in shadow_mapped()
196 p4d = p4d_offset(pgd, addr); in shadow_mapped()
199 pud = pud_offset(p4d, addr); in shadow_mapped()
204 pmd = pmd_offset(pud, addr); in shadow_mapped()
209 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()
[all …]
/mm/kfence/
A Dcore.c337 if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) in check_canary_byte()
359 for (; addr < meta->addr; addr += sizeof(u64)) in set_canary()
362 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); in set_canary()
363 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) in set_canary()
383 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { in check_canary()
393 for (; addr < meta->addr; addr++) { in check_canary()
399 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) { in check_canary()
406 for (; addr - pageaddr < PAGE_SIZE; addr++) { in check_canary()
421 void *addr; in kfence_guarded_alloc() local
472 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc()
[all …]
A Dkfence_test.c33 #define arch_kfence_test_address(addr) (addr) argument
98 unsigned long addr = (unsigned long)r->addr; in report_matches() local
148 addr = arch_kfence_test_address(addr); in report_matches()
152 addr = arch_kfence_test_address(addr); in report_matches()
159 addr = arch_kfence_test_address(addr); in report_matches()
341 expect.addr = buf - 1; in test_out_of_bounds_read()
365 expect.addr = buf - 1; in test_out_of_bounds_write()
390 char *addr; in test_use_after_free_read_nofault() local
396 test_free(addr); in test_use_after_free_read_nofault()
521 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1); in test_kmalloc_aligned_oob_write()
[all …]
/mm/
A Dvmalloc.c185 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pmd_range()
236 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pud_range()
287 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); in vmap_p4d_range()
312 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_range_noflush()
347 addr, end, (long)area->addr, in ioremap_page_range()
369 addr = ALIGN_DOWN(addr, size); in vunmap_pte_range()
3105 if (tmp->addr >= vm->addr) { in vm_area_add_early()
3141 vm->addr = (void *)addr; in vm_area_register_early()
3514 addr = (unsigned long)area->addr; in vmap()
4413 addr = kasan_reset_tag(addr); in vread_iter()
[all …]
A Dsparse-vmemmap.c275 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); in vmemmap_populate_address()
286 unsigned long addr = start; in vmemmap_populate_range() local
289 for (; addr < end; addr += PAGE_SIZE) { in vmemmap_populate_range()
392 for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) { in vmemmap_populate_hvo()
419 unsigned long addr; in vmemmap_populate_hugepages() local
426 for (addr = start; addr < end; addr = next) { in vmemmap_populate_hugepages()
493 addr -= PAGE_SIZE; in compound_section_tail_page()
499 pte = pte_offset_kernel(pmd_off_k(addr), addr); in compound_section_tail_page()
511 unsigned long size, addr; in vmemmap_populate_compound_pages() local
530 for (addr = start; addr < end; addr += size) { in vmemmap_populate_compound_pages()
[all …]
A Dmmap.c365 addr = round_hint_to_min(addr); in do_mmap()
411 if (find_vma_intersection(mm, addr, addr + len)) in do_mmap()
703 if (addr) { in generic_get_unmapped_area()
704 addr = PAGE_ALIGN(addr); in generic_get_unmapped_area()
706 if (mmap_end - len >= addr && addr >= mmap_min_addr && in generic_get_unmapped_area()
754 if (addr) { in generic_get_unmapped_area_topdown()
755 addr = PAGE_ALIGN(addr); in generic_get_unmapped_area_topdown()
843 addr = get_area(file, addr, len, pgoff, flags); in __get_unmapped_area()
848 addr = thp_get_unmapped_area_vmflags(file, addr, len, in __get_unmapped_area()
1079 addr = untagged_addr(addr); in SYSCALL_DEFINE2()
[all …]
A Dpagewalk.c40 err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte, in walk_pte_range_inner()
50 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner()
54 if (addr >= end - PAGE_SIZE) in walk_pte_range_inner()
56 addr += PAGE_SIZE; in walk_pte_range_inner()
108 pmd = pmd_offset(pud, addr); in walk_pmd_range()
162 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
227 } while (pud++, addr = next, addr != end); in walk_pud_range()
265 } while (p4d++, addr = next, addr != end); in walk_p4d_range()
306 } while (pgd++, addr = next, addr != end); in walk_pgd_range()
341 } while (addr = next, addr != end); in walk_hugetlb_range()
[all …]
A Dmemory.c212 } while (pmd++, addr = next, addr != end); in free_pmd_range()
246 } while (pud++, addr = next, addr != end); in free_pud_range()
280 } while (p4d++, addr = next, addr != end); in free_p4d_range()
369 } while (pgd++, addr = next, addr != end); in free_pgd_range()
1850 } while (pud++, addr = next, addr != end); in zap_pud_range()
1869 } while (p4d++, addr = next, addr != end); in zap_p4d_range()
1890 } while (pgd++, addr = next, addr != end); in unmap_page_range()
2727 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
5800 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { in numa_rebuild_large_mapping()
6696 addr = untagged_addr_remote(mm, addr); in __access_remote_vm()
[all …]
A Dmprotect.c186 addr += idx * PAGE_SIZE; in prot_commit_flush_ptes()
441 } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end); in change_pte_range()
520 pmd = pmd_offset(pud, addr); in change_pmd_range()
574 } while (pmd++, addr = next, addr != end); in change_pmd_range()
609 vma->vm_mm, addr, end); in change_pud_range()
632 } while (pudp++, addr = next, addr != end); in change_pud_range()
648 p4d = p4d_offset(pgd, addr); in change_p4d_range()
658 } while (p4d++, addr = next, addr != end); in change_p4d_range()
672 BUG_ON(addr >= end); in change_protection_range()
673 pgd = pgd_offset(mm, addr); in change_protection_range()
[all …]
A Dhmm.c55 for (; addr < end; addr += PAGE_SIZE, i++) { in hmm_pfns_fill()
81 hmm_vma_walk->last = addr; in hmm_vma_fault()
89 for (; addr < end; addr += PAGE_SIZE) in hmm_vma_fault()
215 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { in hmm_vma_handle_pmd()
288 hmm_vma_walk->last = addr; in hmm_vma_handle_pte()
339 unsigned long addr = start; in hmm_vma_walk_pmd() local
350 hmm_vma_walk->last = addr; in hmm_vma_walk_pmd()
395 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { in hmm_vma_walk_pmd()
424 unsigned long addr = start; in hmm_vma_walk_pud() local
520 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) { in hmm_vma_walk_hugetlb_entry()
[all …]
A Dgup_test.c93 index_to_dump, gup->addr); in dump_pages_test()
104 unsigned long i, nr_pages, addr, next; in __gup_test_ioctl() local
127 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { in __gup_test_ioctl()
131 next = addr + nr * PAGE_SIZE; in __gup_test_ioctl()
132 if (next > gup->addr + gup->size) { in __gup_test_ioctl()
133 next = gup->addr + gup->size; in __gup_test_ioctl()
134 nr = (next - addr) / PAGE_SIZE; in __gup_test_ioctl()
153 nr = pin_user_pages(addr, nr, in __gup_test_ioctl()
180 gup->size = addr - gup->addr; in __gup_test_ioctl()
266 addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE; in pin_longterm_test_start()
[all …]
A Dnommu.c106 void vfree(const void *addr) in vfree() argument
108 kfree(addr); in vfree()
167 return virt_to_page(addr); in vmalloc_to_page()
677 if (vma->vm_start != addr) in find_vma_exact()
690 unsigned long addr, in validate_mmap_request() argument
1012 unsigned long addr, in do_mmap() argument
1038 addr = 0; in do_mmap()
1147 addr = file->f_op->get_unmapped_area(file, addr, len, in do_mmap()
1150 ret = addr; in do_mmap()
1279 unsigned long addr; member
[all …]
A Dmincore.c40 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb()
79 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range()
85 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range()
99 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range()
111 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
121 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
126 for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { in mincore_pte_range()
132 __mincore_unmapped_range(addr, addr + PAGE_SIZE, in mincore_pte_range()
138 unsigned int max_nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
208 vma = vma_lookup(current->mm, addr); in do_mincore()
[all …]
A Dptdump.c18 unsigned long addr) in note_kasan_page_table() argument
46 st->note_page_pgd(st, addr, val); in ptdump_pgd_entry()
69 st->note_page_p4d(st, addr, val); in ptdump_p4d_entry()
92 st->note_page_pud(st, addr, val); in ptdump_pud_entry()
113 st->note_page_pmd(st, addr, val); in ptdump_pmd_entry()
129 st->note_page_pte(st, addr, val); in ptdump_pte_entry()
146 st->note_page_pte(st, addr, pte_zero); in ptdump_hole()
149 st->note_page_pmd(st, addr, pmd_zero); in ptdump_hole()
152 st->note_page_pud(st, addr, pud_zero); in ptdump_hole()
155 st->note_page_p4d(st, addr, p4d_zero); in ptdump_hole()
[all …]
A Dmremap.c81 pgd = pgd_offset(mm, addr); in get_old_pud()
1076 unsigned long addr = vrm->addr; in unmap_source_vma() local
1154 if (vm_start < addr) { in unmap_source_vma()
1524 return vrm->addr; in expand_vma()
1557 return vrm->addr; in mremap_at()
1570 return vrm->addr; in mremap_at()
1651 unsigned long addr = vrm->addr; in check_prep_vma() local
1669 vrm->new_addr = addr; in check_prep_vma()
1751 unsigned long addr = vrm->addr; in check_mremap_params() local
1864 vrm->addr = addr; in remap_move()
[all …]
A Dmigrate_device.c25 unsigned long addr; in migrate_vma_collect_skip() local
27 for (addr = start; addr < end; addr += PAGE_SIZE) { in migrate_vma_collect_skip()
41 unsigned long addr; in migrate_vma_collect_hole() local
47 for (addr = start; addr < end; addr += PAGE_SIZE) { in migrate_vma_collect_hole()
87 split_huge_pmd(vma, pmdp, addr); in migrate_vma_collect_pmd()
115 for (; addr < end; addr += PAGE_SIZE, ptep++) { in migrate_vma_collect_pmd()
587 unsigned long addr, in migrate_vma_insert_page() argument
608 pgdp = pgd_offset(mm, addr); in migrate_vma_insert_page()
609 p4dp = p4d_alloc(mm, pgdp, addr); in migrate_vma_insert_page()
612 pudp = pud_alloc(mm, p4dp, addr); in migrate_vma_insert_page()
[all …]
A Dexecmem.c146 ret = set_memory_nx(addr, nr); in execmem_force_rw()
150 return set_memory_rw(addr, nr); in execmem_force_rw()
190 MA_STATE(mas, free_areas, addr - 1, addr + 1); in execmem_cache_add_locked()
194 lower = addr; in execmem_cache_add_locked()
195 upper = addr + size - 1; in execmem_cache_add_locked()
221 if (addr >= range->start && addr + size < range->end) in within_range()
225 addr >= range->fallback_start && addr + size < range->fallback_end) in within_range()
253 addr = mas_free.index; in __execmem_cache_alloc()
257 mas_set_range(&mas_busy, addr, addr + size - 1); in __execmem_cache_alloc()
277 ptr = (void *)addr; in __execmem_cache_alloc()
[all …]
/mm/kmsan/
A Dinstrumentation.c25 (u64)addr < TASK_SIZE) in is_bad_asm_addr()
27 if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW)) in is_bad_asm_addr()
33 get_shadow_origin_ptr(void *addr, u64 size, bool store) in get_shadow_origin_ptr() argument
38 ret = kmsan_get_shadow_origin_ptr(addr, size, store); in get_shadow_origin_ptr()
55 return get_shadow_origin_ptr(addr, size, /*store*/ false); in __msan_metadata_ptr_for_load_n()
65 return get_shadow_origin_ptr(addr, size, /*store*/ true); in __msan_metadata_ptr_for_store_n()
75 void *addr); \
77 void *addr) \
79 return get_shadow_origin_ptr(addr, size, /*store*/ false); \
106 void __msan_instrument_asm_store(void *addr, uintptr_t size) in __msan_instrument_asm_store() argument
[all …]
/mm/damon/
A Dpaddr.c152 addr = r->ar.start; in damon_pa_pageout()
153 while (addr < r->ar.end) { in damon_pa_pageout()
156 addr += PAGE_SIZE; in damon_pa_pageout()
192 addr = r->ar.start; in damon_pa_mark_accessed_or_deactivate()
193 while (addr < r->ar.end) { in damon_pa_mark_accessed_or_deactivate()
196 addr += PAGE_SIZE; in damon_pa_mark_accessed_or_deactivate()
239 addr = r->ar.start; in damon_pa_migrate()
243 addr += PAGE_SIZE; in damon_pa_migrate()
277 unsigned long addr; in damon_pa_stat() local
283 addr = r->ar.start; in damon_pa_stat()
[all …]
A Dvaddr.c323 damon_pmdp_mkold(pmd, walk->vma, addr); in damon_mkold_pmd_entry()
337 damon_ptep_mkold(pte, walk->vma, addr); in damon_mkold_pmd_entry()
348 pte_t entry = huge_ptep_get(mm, addr, pte); in damon_hugetlb_mkold()
360 if (mmu_notifier_clear_young(mm, addr, in damon_hugetlb_mkold()
403 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); in damon_va_mkold()
471 addr)) in damon_young_pmd_entry()
495 mmu_notifier_test_young(walk->mm, addr)) in damon_young_pmd_entry()
553 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); in damon_va_young()
629 damon_ptep_mkold(ptep, vma, addr); in damos_va_filter_young_match()
631 damon_pmdp_mkold(pmdp, vma, addr); in damos_va_filter_young_match()
[all …]

Completed in 119 milliseconds

1234