| /mm/ |
| A D | nommu.c | 679 if (vma->vm_end != end) in find_vma_exact() 997 region->vm_end = vma->vm_end = 0; in do_mmap_private() 1129 vma->vm_end = 0; in do_mmap() 1164 vma->vm_end = region->vm_end = addr + len; in do_mmap() 1337 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma() 1359 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma() 1394 vma->vm_end = from; in vmi_shrink_vma() 1456 if (end == vma->vm_end) in do_munmap() 1654 len = vma->vm_end - addr; in __access_remote_vm() 1739 if (addr_end > vma->vm_end) in __copy_remote_vm_str() [all …]
|
| A D | vma.c | 389 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); in vma_complete() 484 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, in unmap_region() 486 mas_set(mas, vma->vm_end); in unmap_region() 520 new->vm_end = addr; in __split_vma() 564 vma->vm_end = addr; in __split_vma() 918 vmg->end = next->vm_end; in vma_merge_existing_range() 960 vmg->end = next->vm_end; in vma_merge_existing_range() 1066 vmg->end = next->vm_end; in vma_merge_new_range() 1630 if (vma->vm_end > end) { in vma_modify() 1708 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); in vma_merge_extend() [all …]
|
| A D | vma_exec.c | 34 unsigned long old_end = vma->vm_end; in relocate_vma_down() 138 vma->vm_end = STACK_TOP_MAX; in create_init_stack_vma() 139 vma->vm_start = vma->vm_end - PAGE_SIZE; in create_init_stack_vma() 150 *top_mem_p = vma->vm_end - sizeof(void *); in create_init_stack_vma()
|
| A D | mremap.c | 1044 if (!err && vma->vm_end != old_addr + old_len) in prep_move_vma() 1082 unsigned long vm_end; in unmap_source_vma() local 1114 vm_end = vma->vm_end; in unmap_source_vma() 1160 if (vm_end > end) { in unmap_source_vma() 1246 unsigned long old_end = vrm->vma->vm_end; in dontunmap_complete() 1415 unsigned long end = vma->vm_end + delta; in vma_expandable() 1417 if (end < vma->vm_end) /* overflow */ in vma_expandable() 1454 VMA_ITERATOR(vmi, mm, vma->vm_end); in expand_vma_in_place() 1716 if (old_len > vma->vm_end - addr) in check_prep_vma() 1835 unsigned long len = min(end, vma->vm_end) - addr; in remap_move() [all …]
|
| A D | msync.c | 90 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() 91 start = vma->vm_end; in SYSCALL_DEFINE3() 107 vma = find_vma(mm, vma->vm_end); in SYSCALL_DEFINE3()
|
| A D | vma.h | 216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 237 desc->end = vma->vm_end; in vma_to_desc() 486 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store_overwrite() 491 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store_overwrite() 500 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_overwrite()
|
| A D | userfaultfd.c | 27 if (dst_end > dst_vma->vm_end) in validate_dst_vma() 907 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, in uffd_wp_range() 977 _end = min(dst_vma->vm_end, end); in mwriteprotect_range() 1543 if (src_start >= vma->vm_start && src_start < vma->vm_end) in find_vmas_mm_locked() 1573 if (src_start >= vma->vm_start && src_start < vma->vm_end) { in uffd_move_lock() 1766 if (src_start + len > src_vma->vm_end) in move_pages() 1771 if (dst_start + len > dst_vma->vm_end) in move_pages() 1943 if (start == vma->vm_start && end == vma->vm_end) in userfaultfd_clear_vma() 1998 vma_end = min(end, vma->vm_end); in userfaultfd_register_range() 2020 start = vma->vm_end; in userfaultfd_register_range() [all …]
|
| A D | mmap_lock.c | 167 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu() 239 if (unlikely(from_addr >= vma->vm_end)) in lock_next_vma() 266 vma_iter_set(vmi, IS_ERR_OR_NULL(vma) ? from_addr : vma->vm_end); in lock_next_vma()
|
| A D | pagewalk.c | 491 next = min(end, vma->vm_end); in walk_page_range_mm() 492 vma = find_vma(mm, vma->vm_end); in walk_page_range_mm() 698 if (start < vma->vm_start || end > vma->vm_end) in walk_page_range_vma() 725 return __walk_page_range(vma->vm_start, vma->vm_end, &walk); in walk_page_vma() 793 err = walk_page_test(vma->vm_start, vma->vm_end, &walk); in walk_page_mapping() 880 if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end)) in folio_walk_start()
|
| A D | mseal.c | 49 prev_end = vma->vm_end; in range_contains_unmapped() 69 unsigned long curr_end = MIN(vma->vm_end, end); in mseal_apply()
|
| A D | mmap.c | 975 populate_vma_page_range(prev, addr, prev->vm_end, NULL); in find_extend_vma_locked() 1169 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5() 1170 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5() 1175 if (next->vm_start != prev->vm_end) in SYSCALL_DEFINE5() 1184 if (start + size <= next->vm_end) in SYSCALL_DEFINE5() 1290 vma_iter_set(&vmi, vma->vm_end); in exit_mmap() 1300 vma_iter_set(&vmi, vma->vm_end); in exit_mmap() 1756 mpnt->vm_end, GFP_KERNEL); in dup_mmap() 1859 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); in dup_mmap()
|
| A D | internal.h | 996 if (end > vma->vm_end) in folio_within_range() 997 end = vma->vm_end; in folio_within_range() 1013 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma() 1083 if (address < vma->vm_start || address >= vma->vm_end) in vma_address() 1111 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end() 1112 address = vma->vm_end; in vma_address_end() 1529 vma->vm_end = end; in vma_set_range()
|
| A D | vma_init.c | 46 dest->vm_end = src->vm_end; in vm_area_init_from()
|
| A D | madvise.c | 820 if (range.start >= vma->vm_end) in madvise_free_single_vma() 822 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma() 933 if (range->end > vma->vm_end) { in madvise_dontneed_free() 946 range->end = vma->vm_end; in madvise_dontneed_free() 1595 if (madv_behavior->range.end > vma->vm_end || current->mm != mm || in try_vma_read_lock() 1664 range->end = min(vma->vm_end, last_end); in madvise_walk_vmas() 1682 if (vma && range->end < vma->vm_end) in madvise_walk_vmas() 1683 range->end = vma->vm_end; in madvise_walk_vmas() 1687 vma = find_vma(mm, vma ? vma->vm_end : range->end); in madvise_walk_vmas()
|
| A D | mlock.c | 548 tmp = vma->vm_end; in apply_vma_lock_flags() 589 if (end < vma->vm_end) { in count_mm_mlocked_page_nr() 593 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr() 734 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()
|
| A D | memory.c | 418 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 1366 unsigned long end = src_vma->vm_end; in copy_page_range() 1903 if (start >= vma->vm_end) in unmap_single_vma() 1905 end = min(vma->vm_end, end_addr); in unmap_single_vma() 2347 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page() 2540 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot() 2602 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed() 2806 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_internal() 2978 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory() 5796 vma->vm_end); in numa_rebuild_large_mapping() [all …]
|
| A D | gup.c | 1312 vma->vm_start, vma->vm_end, addr); in gup_vma_lookup() 1397 if (!vma || start >= vma->vm_end) { in __get_user_pages() 1845 VM_WARN_ON_ONCE_VMA(end > vma->vm_end, vma); in populate_vma_page_range() 1964 } else if (nstart >= vma->vm_end) in __mm_populate() 1965 vma = find_vma_intersection(mm, vma->vm_end, end); in __mm_populate() 1973 nend = min(end, vma->vm_end); in __mm_populate()
|
| A D | mincore.c | 211 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
|
| A D | migrate_device.c | 548 args->start >= args->vma->vm_end) in migrate_vma_setup() 550 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) in migrate_vma_setup()
|
| A D | huge_memory.c | 152 addr = vma->vm_end - (PAGE_SIZE << order); in __thp_vma_allowable_orders() 1467 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd() 1503 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_folio_pmd() 1604 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud() 1638 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_folio_pud() 2747 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked() 2853 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked() 4350 addr = vma->vm_end; in split_huge_pages_pid()
|
| A D | ksm.c | 1201 vma->vm_start, vma->vm_end, false); in unmerge_and_remove_all_rmap_items() 2545 ksm_scan.address = vma->vm_end; in scan_get_next_rmap_item() 2547 while (ksm_scan.address < vma->vm_end) { in scan_get_next_rmap_item() 2725 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); in __ksm_del_vma() 3057 if (addr < vma->vm_start || addr >= vma->vm_end) in rmap_walk_ksm()
|
| A D | debug.c | 164 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, in dump_vma()
|
| A D | mprotect.c | 920 end = vma->vm_end; in do_mprotect_pkey() 980 tmp = vma->vm_end; in do_mprotect_pkey()
|
| /mm/damon/tests/ |
| A D | vaddr-kunit.h | 28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas() 72 (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, in damon_test_three_regions_in_vmas() 73 (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, in damon_test_three_regions_in_vmas() 74 (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, in damon_test_three_regions_in_vmas() 75 (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, in damon_test_three_regions_in_vmas() 76 (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, in damon_test_three_regions_in_vmas() 77 (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, in damon_test_three_regions_in_vmas()
|
| /mm/damon/ |
| A D | vaddr.c | 141 gap = vma->vm_start - prev->vm_end; in __damon_va_three_regions() 145 first_gap.start = prev->vm_end; in __damon_va_three_regions() 148 second_gap.start = prev->vm_end; in __damon_va_three_regions() 169 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); in __damon_va_three_regions()
|