| /mm/ |
| A D | numa_memblks.c | 152 mi->blk[mi->nr_blks].start = start; in numa_add_memblk_to() 247 bi->start, bi->end - bi->start)) { in numa_cleanup_meminfo() 253 bi->start = max(bi->start, low); in numa_cleanup_meminfo() 280 if (bi->end > bj->start && bi->start < bj->end) { in numa_cleanup_meminfo() 299 start = min(bi->start, bj->start); in numa_cleanup_meminfo() 306 if (start < bk->end && end > bk->start) in numa_cleanup_meminfo() 314 bi->start = start; in numa_cleanup_meminfo() 489 return (ma->start > mb->start) - (ma->start < mb->start); in cmp_memblk() 522 if (memblock_addrs_overlap(start, end - start, bi->start, in numa_fill_memblks() 535 blk[0]->start = min(blk[0]->start, start); in numa_fill_memblks() [all …]
|
| A D | madvise.c | 62 unsigned long start; member 288 unsigned long start = madv_behavior->range.start; in madvise_willneed() local 973 unsigned long start = madv_behavior->range.start; in madvise_populate() local 1018 unsigned long start = madv_behavior->range.start; in madvise_remove() local 1445 unsigned long start = madv_behavior->range.start; in madvise_inject_error() local 1451 for (; start < end; start += size) { in madvise_inject_error() 1808 if (start + len < start) in is_valid_madvise() 1832 if (start + PAGE_ALIGN(len_in) == start) { in madvise_should_skip() 1873 range->start = start; in madvise_do_behavior() 1878 range->start = get_untagged_addr(madv_behavior->mm, start); in madvise_do_behavior() [all …]
|
| A D | mseal.c | 42 unsigned long prev_end = start; in range_contains_unmapped() 43 VMA_ITERATOR(vmi, current->mm, start); in range_contains_unmapped() 59 unsigned long curr_start = start; in mseal_apply() 60 VMA_ITERATOR(vmi, mm, start); in mseal_apply() 65 if (start > vma->vm_start) in mseal_apply() 150 start = untagged_addr(start); in do_mseal() 151 if (!PAGE_ALIGNED(start)) in do_mseal() 159 end = start + len; in do_mseal() 160 if (end < start) in do_mseal() 163 if (end == start) in do_mseal() [all …]
|
| A D | msync.c | 40 start = untagged_addr(start); in SYSCALL_DEFINE3() 44 if (offset_in_page(start)) in SYSCALL_DEFINE3() 50 end = start + len; in SYSCALL_DEFINE3() 51 if (end < start) in SYSCALL_DEFINE3() 54 if (end == start) in SYSCALL_DEFINE3() 63 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 76 start = vma->vm_start; in SYSCALL_DEFINE3() 77 if (start >= end) in SYSCALL_DEFINE3() 91 start = vma->vm_end; in SYSCALL_DEFINE3() 98 if (error || start >= end) in SYSCALL_DEFINE3() [all …]
|
| A D | mlock.c | 523 end = start + len; in apply_vma_lock_flags() 524 if (end < start) in apply_vma_lock_flags() 526 if (end == start) in apply_vma_lock_flags() 533 if (start > vma->vm_start) in apply_vma_lock_flags() 536 nstart = start; in apply_vma_lock_flags() 583 end = start + len; in count_mm_mlocked_page_nr() 618 start = untagged_addr(start); in do_mlock() 624 start &= PAGE_MASK; in do_mlock() 642 start, len); in do_mlock() 681 start = untagged_addr(start); in SYSCALL_DEFINE2() [all …]
|
| A D | memory_hotplug.c | 305 start, start + size); in register_memory_resource() 1424 for (cur_start = start; cur_start < start + size; in remove_memory_blocks_and_altmaps() 1453 for (cur_start = start; cur_start < start + size; in create_altmaps_and_memory_blocks() 1492 remove_memory_blocks_and_altmaps(start, cur_start - start); in create_altmaps_and_memory_blocks() 1507 u64 start, size; in add_memory_resource() local 1511 start = res->start; in add_memory_resource() 1582 firmware_map_add_hotplug(start, start + size, "System RAM"); in add_memory_resource() 1701 .start = 0UL, in arch_get_mappable_range() 1732 if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) in mhp_range_allowed() 1736 start, end, mhp_range.start, mhp_range.end); in mhp_range_allowed() [all …]
|
| A D | numa_emulation.c | 62 eb->start = pb->start; in emu_setup_memblk() 69 pb->start += size; in emu_setup_memblk() 76 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); in emu_setup_memblk() 132 u64 start, limit, end; in split_nodes_interleave() local 140 start = pi->blk[phys_blk].start; in split_nodes_interleave() 142 end = start + size; in split_nodes_interleave() 151 while (end - start - mem_hole_size(start, end) < size) { in split_nodes_interleave() 192 u64 end = start + size; in find_end_of_node() 194 while (end - start - mem_hole_size(start, end) < size) { in find_end_of_node() 288 start = pi->blk[phys_blk].start; in split_nodes_size_interleave_uniform() [all …]
|
| A D | gup.c | 1384 start = untagged_addr_remote(mm, start); in __get_user_pages() 1670 unsigned long start, in __get_user_pages_locked() argument 1791 start += PAGE_SIZE; in __get_user_pages_locked() 1953 end = start + len; in __mm_populate() 2046 start = (start + PAGE_SIZE) & PAGE_MASK; in __get_user_pages_locked() 2082 if (size > cur - start) in fault_in_writeable() 2153 if (size > cur - start) in fault_in_safe_writeable() 2185 if (size > cur - start) in fault_in_readable() 2478 unsigned long start, in __gup_longterm_locked() argument 3218 start = untagged_addr(start) & PAGE_MASK; in gup_fast_fallback() [all …]
|
| A D | memremap.c | 51 unsigned long pfn = PHYS_PFN(range->start); in pfn_first() 65 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid() 77 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end() 99 __remove_pages(PHYS_PFN(range->start), in pageunmap_range() 102 arch_remove_memory(range->start, range_len(range), in pageunmap_range() 108 pfnmap_untrack(PHYS_PFN(range->start), range_len(range)); in pageunmap_range() 176 range->start, range->end); in pagemap_range() 212 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range() 229 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range() 243 PHYS_PFN(range->start), in pagemap_range() [all …]
|
| A D | memblock.c | 318 start = max_t(phys_addr_t, start, PAGE_SIZE); in memblock_find_in_range_node() 2096 r->base = start; in memblock_trim_memory() 2223 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap() 2230 start = pageblock_start_pfn(start); in free_unused_memmap() 2267 if (start) in __free_pages_memory() 2275 memblock_free_pages(pfn_to_page(start), start, order); in __free_pages_memory() 2426 map->start = start; in reserved_mem_add() 2465 *start = map->start; in reserve_mem_find_by_name() 2484 void *start, *end; in reserve_mem_release_by_name() local 2491 start = phys_to_virt(map->start); in reserve_mem_release_by_name() [all …]
|
| A D | hugetlb_vmemmap.c | 56 unsigned long addr = start; in vmemmap_split_pmd() 88 flush_tlb_kernel_range(start, start + PMD_SIZE); in vmemmap_split_pmd() 166 VM_BUG_ON(!PAGE_ALIGNED(start | end)); in vmemmap_remap_range() 176 flush_tlb_kernel_range(start, end); in vmemmap_remap_range() 298 BUG_ON(start - reuse != PAGE_SIZE); in vmemmap_remap_split() 362 BUG_ON(start - reuse != PAGE_SIZE); in vmemmap_remap_free() 435 BUG_ON(start - reuse != PAGE_SIZE); in vmemmap_remap_alloc() 792 unsigned long start, end; in hugetlb_vmemmap_init_early() local 818 start = (unsigned long)map; in hugetlb_vmemmap_init_early() 873 start = (unsigned long)map; in hugetlb_vmemmap_init_late() [all …]
|
| A D | mapping_dirty_helpers.c | 69 pgoff_t start; member 110 cwalk->start = min(cwalk->start, pgoff); in clean_record_pte() 169 static int wp_clean_pre_vma(unsigned long start, unsigned long end, in wp_clean_pre_vma() argument 175 wpwalk->tlbflush_end = start; in wp_clean_pre_vma() 178 walk->mm, start, end); in wp_clean_pre_vma() 180 flush_cache_range(walk->vma, start, end); in wp_clean_pre_vma() 203 flush_tlb_range(walk->vma, wpwalk->range.start, in wp_clean_post_vma() 317 pgoff_t *start, in clean_record_shared_mapping_range() argument 320 bool none_set = (*start >= *end); in clean_record_shared_mapping_range() 325 .start = none_set ? nr : *start, in clean_record_shared_mapping_range() [all …]
|
| A D | hmm.c | 166 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole() 330 unsigned long start, in hmm_vma_walk_pmd() argument 337 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd() 339 unsigned long addr = start; in hmm_vma_walk_pmd() 424 unsigned long addr = start; in hmm_vma_walk_pud() 446 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud() 482 unsigned long addr = start, i, pfn; in hmm_vma_walk_hugetlb_entry() 495 i = (start - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hugetlb_entry() 556 ((start - range->start) >> PAGE_SHIFT), in hmm_vma_walk_test() 557 (end - start) >> PAGE_SHIFT, 0)) in hmm_vma_walk_test() [all …]
|
| A D | pagewalk.c | 369 return ops->test_walk(start, end, walk); in walk_page_test() 401 err = ops->pre_vma(start, end, walk); in __walk_page_range() 410 err = walk_pgd_range(start, end, walk); in __walk_page_range() 468 if (start >= end) in walk_page_range_mm() 476 vma = find_vma(walk.mm, start); in walk_page_range_mm() 510 } while (start = next, start < end); in walk_page_range_mm() 618 if (start >= end) in walk_kernel_page_table_range() 633 return walk_pgd_range(start, end, &walk); in walk_kernel_page_table_range() 667 if (start >= end || !walk.mm) in walk_page_range_debug() 682 return walk_pgd_range(start, end, &walk); in walk_page_range_debug() [all …]
|
| A D | sparse-vmemmap.c | 144 unsigned long start, unsigned long end) in vmemmap_verify() argument 151 start, end - 1); in vmemmap_verify() 286 unsigned long addr = start; in vmemmap_populate_range() 426 for (addr = start; addr < end; addr = next) { in vmemmap_populate_hugepages() 484 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); in reuse_compound_section() 507 unsigned long start, in vmemmap_populate_compound_pages() 516 pte = compound_section_tail_page(start); in vmemmap_populate_compound_pages() 524 return vmemmap_populate_range(start, end, node, NULL, in vmemmap_populate_compound_pages() 530 for (addr = start; addr < end; addr += size) { in vmemmap_populate_compound_pages() 565 unsigned long start = (unsigned long) pfn_to_page(pfn); in __populate_section_memmap() local [all …]
|
| A D | memtest.c | 42 u64 *p, *start, *end; in memtest() local 48 start = __va(start_phys_aligned); in memtest() 49 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest() 53 for (p = start; p < end; p++) in memtest() 56 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest() 73 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument 80 this_start = clamp(this_start, start, end); in do_one_pass() 81 this_end = clamp(this_end, start, end); in do_one_pass() 107 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument 118 do_one_pass(patterns[idx], start, end); in early_memtest()
|
| A D | mprotect.c | 590 range.start = 0; in change_pud_range() 606 if (!range.start) { in change_pud_range() 634 if (range.start) in change_pud_range() 873 start = untagged_addr(start); in do_mprotect_pkey() 879 if (start & ~PAGE_MASK) in do_mprotect_pkey() 884 end = start + len; in do_mprotect_pkey() 885 if (end <= start) in do_mprotect_pkey() 912 start = vma->vm_start; in do_mprotect_pkey() 917 if (vma->vm_start > start) in do_mprotect_pkey() 928 if (start > vma->vm_start) in do_mprotect_pkey() [all …]
|
| A D | vma.c | 57 .start = (map_)->addr, \ 814 unsigned long start = vmg->start; in vma_merge_existing_range() local 966 vmg->end = start; in vma_merge_existing_range() 1192 if (vma->vm_start < start) in vma_shrink() 1486 vms->start = start; in init_vma_munmap() 1489 vms->start = vms->end = 0; in init_vma_munmap() 1569 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) in do_vmi_munmap() 1573 if (end == start) in do_vmi_munmap() 1603 unsigned long start = vmg->start; in vma_modify() local 1622 if (vma->vm_start < start) { in vma_modify() [all …]
|
| A D | truncate.c | 198 if (pos < start) in truncate_inode_partial_folio() 199 offset = start - pos; in truncate_inode_partial_folio() 344 pgoff_t start; /* inclusive */ in truncate_inode_pages_range() local 374 index = start; in truncate_inode_pages_range() 412 index = start; in truncate_inode_pages_range() 418 if (index == start) in truncate_inode_pages_range() 421 index = start; in truncate_inode_pages_range() 514 pgoff_t index = start; in mapping_try_invalidate() 575 pgoff_t start, pgoff_t end) in invalidate_mapping_pages() argument 662 index = start; in invalidate_inode_pages2_range() [all …]
|
| /mm/damon/tests/ |
| A D | vaddr-kunit.h | 86 KUNIT_EXPECT_EQ(test, 10ul, regions[0].start); in damon_test_three_regions_in_vmas() 88 KUNIT_EXPECT_EQ(test, 200ul, regions[1].start); in damon_test_three_regions_in_vmas() 90 KUNIT_EXPECT_EQ(test, 300ul, regions[2].start); in damon_test_three_regions_in_vmas() 168 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions1() 169 (struct damon_addr_range){.start = 45, .end = 55}, in damon_test_apply_three_regions1() 190 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions2() 214 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions3() 239 (struct damon_addr_range){.start = 5, .end = 7}, in damon_test_apply_three_regions4() 261 KUNIT_EXPECT_EQ(test, r->ar.start, start); in damon_test_split_evenly_fail() 284 r->ar.start, start + i * expected_width); in damon_test_split_evenly_succ() [all …]
|
| /mm/kmsan/ |
| A D | hooks.c | 159 nr = (end - start) / PAGE_SIZE; in kmsan_ioremap_page_range() 169 vmalloc_shadow(start + off), in kmsan_ioremap_page_range() 178 vmalloc_origin(start + off), in kmsan_ioremap_page_range() 183 vmalloc_shadow(start + off), in kmsan_ioremap_page_range() 184 vmalloc_shadow(start + off + PAGE_SIZE)); in kmsan_ioremap_page_range() 205 vmalloc_shadow(start), in kmsan_ioremap_page_range() 206 vmalloc_shadow(start + clean * PAGE_SIZE)); in kmsan_ioremap_page_range() 208 vmalloc_origin(start), in kmsan_ioremap_page_range() 209 vmalloc_origin(start + clean * PAGE_SIZE)); in kmsan_ioremap_page_range() 226 nr = (end - start) / PAGE_SIZE; in kmsan_iounmap_page_range() [all …]
|
| /mm/kasan/ |
| A D | hw_tags.c | 298 for (addr = start; addr < start + size; addr += PAGE_SIZE) { in init_vmalloc_pages() 313 init_vmalloc_pages(start, size); in __kasan_unpoison_vmalloc() 314 return (void *)start; in __kasan_unpoison_vmalloc() 338 return (void *)start; in __kasan_unpoison_vmalloc() 347 return (void *)start; in __kasan_unpoison_vmalloc() 351 start = set_tag(start, tag); in __kasan_unpoison_vmalloc() 354 kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT); in __kasan_unpoison_vmalloc() 361 redzone_start = round_up((unsigned long)start + size, in __kasan_unpoison_vmalloc() 371 unpoison_vmalloc_pages(start, tag); in __kasan_unpoison_vmalloc() 373 return (void *)start; in __kasan_unpoison_vmalloc() [all …]
|
| A D | shadow.c | 296 unsigned long start; member 311 index = PFN_DOWN(addr - data->start); in kasan_populate_vmalloc_pte() 372 data.start = start; in __kasan_populate_vmalloc() 379 start += nr_pages * PAGE_SIZE; in __kasan_populate_vmalloc() 571 if (start != region_start && in kasan_release_vmalloc() 615 return (void *)start; in __kasan_unpoison_vmalloc() 618 return (void *)start; in __kasan_unpoison_vmalloc() 626 return (void *)start; in __kasan_unpoison_vmalloc() 628 start = set_tag(start, kasan_random_tag()); in __kasan_unpoison_vmalloc() 629 kasan_unpoison(start, size, false); in __kasan_unpoison_vmalloc() [all …]
|
| A D | generic.c | 87 if (unlikely(*start)) in bytes_is_nonzero() 88 return (unsigned long)start; in bytes_is_nonzero() 89 start++; in bytes_is_nonzero() 103 if (end - start <= 16) in memory_is_nonzero() 104 return bytes_is_nonzero(start, end - start); in memory_is_nonzero() 111 start += prefix; in memory_is_nonzero() 114 words = (end - start) / 8; in memory_is_nonzero() 116 if (unlikely(*(u64 *)start)) in memory_is_nonzero() 117 return bytes_is_nonzero(start, 8); in memory_is_nonzero() 118 start += 8; in memory_is_nonzero() [all …]
|
| /mm/damon/ |
| A D | vaddr.c | 70 unsigned long start; in damon_va_evenly_split_region() local 86 r->ar.end = r->ar.start + sz_piece; in damon_va_evenly_split_region() 88 for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) { in damon_va_evenly_split_region() 89 n = damon_new_region(start, start + sz_piece); in damon_va_evenly_split_region() 104 return r->end - r->start; in sz_range() 126 unsigned long start; in __damon_va_three_regions() local 138 start = vma->vm_start; in __damon_va_three_regions() 145 first_gap.start = prev->vm_end; in __damon_va_three_regions() 160 if (first_gap.start > second_gap.start) in __damon_va_three_regions() 164 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions() [all …]
|