| /mm/ |
| A D | nommu.c | 482 if (region->vm_start < pregion->vm_start) in add_nommu_region() 484 else if (region->vm_start > pregion->vm_start) in add_nommu_region() 970 vma->vm_start = region->vm_start; in do_mmap_private() 996 region->vm_start = vma->vm_start = 0; in do_mmap_private() 1128 vma->vm_start = 0; in do_mmap() 1163 vma->vm_start = region->vm_start = addr; in do_mmap() 1190 result = vma->vm_start; in do_mmap() 1339 region->vm_start = new->vm_start = addr; in split_vma() 1356 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma() 1398 vma->vm_start = to; in vmi_shrink_vma() [all …]
|
| A D | vma.c | 506 WARN_ON(vma->vm_start >= addr); in __split_vma() 522 new->vm_start = addr; in __split_vma() 561 vma->vm_start = addr; in __split_vma() 917 vmg->start = prev->vm_start; in vma_merge_existing_range() 937 vmg->start = prev->vm_start; in vma_merge_existing_range() 1072 vmg->start = prev->vm_start; in vma_merge_new_range() 1192 if (vma->vm_start < start) in vma_shrink() 1622 if (vma->vm_start < start) { in vma_modify() 1921 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible() 2931 high_limit = tmp->vm_start; in unmapped_area_topdown() [all …]
|
| A D | vma.h | 164 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset() 213 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp() 216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 236 desc->start = vma->vm_start; in vma_to_desc() 483 vmi->mas.index > vma->vm_start)) { in vma_iter_store_overwrite() 485 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store_overwrite() 489 vmi->mas.last < vma->vm_start)) { in vma_iter_store_overwrite() 491 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store_overwrite() 497 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_overwrite() 500 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_overwrite()
|
| A D | mremap.c | 646 if (pmc->for_stack && addr_masked >= vma->vm_start) in can_align_down() 1042 if (vma->vm_start != old_addr) in prep_move_vma() 1081 unsigned long vm_start; in unmap_source_vma() local 1113 vm_start = vma->vm_start; in unmap_source_vma() 1154 if (vm_start < addr) { in unmap_source_vma() 1245 unsigned long old_start = vrm->vma->vm_start; in dontunmap_complete() 1421 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable() 1727 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in check_prep_vma() 1834 unsigned long addr = max(vma->vm_start, start); in remap_move() 1842 if (!seen_vma && start < vma->vm_start) in remap_move() [all …]
|
| A D | vma_exec.c | 33 unsigned long old_start = vma->vm_start; in relocate_vma_down() 76 next ? next->vm_start : USER_PGTABLES_CEILING); in relocate_vma_down() 85 next ? next->vm_start : USER_PGTABLES_CEILING); in relocate_vma_down() 139 vma->vm_start = vma->vm_end - PAGE_SIZE; in create_init_stack_vma()
|
| A D | mlock.c | 533 if (start > vma->vm_start) in apply_vma_lock_flags() 537 tmp = vma->vm_start; in apply_vma_lock_flags() 542 if (vma->vm_start != tmp) in apply_vma_lock_flags() 587 if (start > vma->vm_start) in count_mm_mlocked_page_nr() 588 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr() 590 count += end - vma->vm_start; in count_mm_mlocked_page_nr() 593 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr() 734 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()
|
| A D | msync.c | 73 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 76 start = vma->vm_start; in SYSCALL_DEFINE3() 88 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3()
|
| A D | mmap_lock.c | 167 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu() 248 if (from_addr < vma->vm_start) { in lock_next_vma() 341 if (likely(vma && (vma->vm_start <= addr))) in lock_mm_and_find_vma() 369 if (vma->vm_start <= addr) in lock_mm_and_find_vma()
|
| A D | pagewalk.c | 483 } else if (start < vma->vm_start) { /* outside vma */ in walk_page_range_mm() 485 next = min(end, vma->vm_start); in walk_page_range_mm() 698 if (start < vma->vm_start || end > vma->vm_end) in walk_page_range_vma() 725 return __walk_page_range(vma->vm_start, vma->vm_end, &walk); in walk_page_vma() 785 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; in walk_page_mapping() 786 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; in walk_page_mapping() 793 err = walk_page_test(vma->vm_start, vma->vm_end, &walk); in walk_page_mapping() 880 if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end)) in folio_walk_start()
|
| A D | internal.h | 993 if (start < vma->vm_start) in folio_within_range() 994 start = vma->vm_start; in folio_within_range() 1005 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in folio_within_range() 1013 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma() 1080 address = vma->vm_start + in vma_address() 1083 if (address < vma->vm_start || address >= vma->vm_end) in vma_address() 1087 address = vma->vm_start; in vma_address() 1109 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end() 1111 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end() 1528 vma->vm_start = start; in vma_set_range()
|
| A D | mseal.c | 46 if (vma->vm_start > prev_end) in range_contains_unmapped() 65 if (start > vma->vm_start) in mseal_apply()
|
| A D | mmap.c | 165 if (!brkvma || brkvma->vm_start >= oldbrk) in SYSCALL_DEFINE1() 968 if (vma && (vma->vm_start <= addr)) in find_extend_vma_locked() 993 if (vma->vm_start <= addr) in find_extend_vma_locked() 995 start = vma->vm_start; in find_extend_vma_locked() 1036 if (vma && vma->vm_start <= addr) in expand_stack() 1175 if (next->vm_start != prev->vm_end) in SYSCALL_DEFINE5() 1701 if (!write || addr >= new_vma->vm_start) { in mmap_read_lock_maybe_expand() 1755 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, in dup_mmap() 1859 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); in dup_mmap()
|
| A D | mprotect.c | 910 if (vma->vm_start >= end) in do_mprotect_pkey() 912 start = vma->vm_start; in do_mprotect_pkey() 917 if (vma->vm_start > start) in do_mprotect_pkey() 928 if (start > vma->vm_start) in do_mprotect_pkey() 933 tmp = vma->vm_start; in do_mprotect_pkey() 939 if (vma->vm_start != tmp) { in do_mprotect_pkey()
|
| A D | vma_init.c | 45 dest->vm_start = src->vm_start; in vm_area_init_from()
|
| A D | userfaultfd.c | 907 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, in uffd_wp_range() 976 _start = max(dst_vma->vm_start, start); in mwriteprotect_range() 1543 if (src_start >= vma->vm_start && src_start < vma->vm_end) in find_vmas_mm_locked() 1573 if (src_start >= vma->vm_start && src_start < vma->vm_end) { in uffd_move_lock() 1943 if (start == vma->vm_start && end == vma->vm_end) in userfaultfd_clear_vma() 1977 if (vma->vm_start < start) in userfaultfd_register_range() 1996 if (vma->vm_start > start) in userfaultfd_register_range() 1997 start = vma->vm_start; in userfaultfd_register_range() 2070 vma->vm_start, vma->vm_end); in userfaultfd_release_all()
|
| A D | madvise.c | 257 addr = vma->vm_start + in shmem_swapin_range() 322 offset = (loff_t)(start - vma->vm_start) in madvise_willneed() 819 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma() 823 if (range.end <= vma->vm_start) in madvise_free_single_vma() 1035 offset = (loff_t)(start - vma->vm_start) in madvise_remove() 1641 if (vma && range->start > vma->vm_start) in madvise_walk_vmas() 1650 if (range->start < vma->vm_start) { in madvise_walk_vmas() 1658 range->start = vma->vm_start; in madvise_walk_vmas()
|
| A D | memory.c | 381 unsigned long addr = vma->vm_start; in free_pgtables() 419 floor, next ? next->vm_start : ceiling); in free_pgtables() 628 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page() 681 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd() 1365 unsigned long addr = src_vma->vm_start; in copy_page_range() 1906 if (end <= vma->vm_start) in unmap_single_vma() 2375 unsigned long uaddr = vma->vm_start; in __vm_map_pages() 2978 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory() 6942 ip -= vma->vm_start; in print_vma_addr() 6945 vma->vm_start, in print_vma_addr() [all …]
|
| A D | gup.c | 1294 if (!vma || (addr >= vma->vm_start)) in gup_vma_lookup() 1300 if (vma->vm_start - addr > 65536) in gup_vma_lookup() 1312 vma->vm_start, vma->vm_end, addr); in gup_vma_lookup() 1844 VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma); in populate_vma_page_range() 1976 if (nstart < vma->vm_start) in __mm_populate() 1977 nstart = vma->vm_start; in __mm_populate()
|
| A D | mempolicy.c | 848 (qp->start < vma->vm_start)) in queue_pages_test_walk() 855 (!next || vma->vm_end < next->vm_start))) in queue_pages_test_walk() 971 if (start > vma->vm_start) { in mbind_range() 975 vmstart = vma->vm_start; in mbind_range() 1239 nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, in migrate_to_node() 1978 *ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order); in get_vma_policy() 1991 pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx); in vma_policy_mof()
|
| A D | migrate_device.c | 547 if (args->start < args->vma->vm_start || in migrate_vma_setup() 550 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) in migrate_vma_setup()
|
| A D | mapping_dirty_helpers.c | 97 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + in clean_record_pte()
|
| A D | ksm.c | 1201 vma->vm_start, vma->vm_end, false); in unmerge_and_remove_all_rmap_items() 2542 if (ksm_scan.address < vma->vm_start) in scan_get_next_rmap_item() 2543 ksm_scan.address = vma->vm_start; in scan_get_next_rmap_item() 2725 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); in __ksm_del_vma() 3057 if (addr < vma->vm_start || addr >= vma->vm_end) in rmap_walk_ksm()
|
| A D | debug.c | 164 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, in dump_vma()
|
| /mm/damon/tests/ |
| A D | vaddr-kunit.h | 28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas() 72 (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, in damon_test_three_regions_in_vmas() 73 (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, in damon_test_three_regions_in_vmas() 74 (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, in damon_test_three_regions_in_vmas() 75 (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, in damon_test_three_regions_in_vmas() 76 (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, in damon_test_three_regions_in_vmas() 77 (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, in damon_test_three_regions_in_vmas()
|
| /mm/damon/ |
| A D | vaddr.c | 138 start = vma->vm_start; in __damon_va_three_regions() 141 gap = vma->vm_start - prev->vm_end; in __damon_va_three_regions() 146 first_gap.end = vma->vm_start; in __damon_va_three_regions() 149 second_gap.end = vma->vm_start; in __damon_va_three_regions() 698 ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order); in damos_va_migrate_dests_add()
|