Home
last modified time | relevance | path

Searched refs:end (Results 1 – 25 of 86) sorted by relevance

1234

/mm/
A Dnuma_memblks.c137 if (start == end) in numa_add_memblk_to()
143 nid, start, end - 1); in numa_add_memblk_to()
153 mi->blk[mi->nr_blks].end = end; in numa_add_memblk_to()
259 bi->end = high; in numa_cleanup_meminfo()
273 u64 start, end; in numa_cleanup_meminfo() local
280 if (bi->end > bj->start && bi->start < bj->end) { in numa_cleanup_meminfo()
300 end = max(bi->end, bj->end); in numa_cleanup_meminfo()
306 if (start < bk->end && end > bk->start) in numa_cleanup_meminfo()
313 bj->end - 1, start, end - 1); in numa_cleanup_meminfo()
315 bi->end = end; in numa_cleanup_meminfo()
[all …]
A Dnuma_emulation.c76 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); in emu_setup_memblk()
151 while (end - start - mem_hole_size(start, end) < size) { in split_nodes_interleave()
154 end = limit; in split_nodes_interleave()
164 if (end < dma32_end && dma32_end - end - in split_nodes_interleave()
173 if (limit - end - mem_hole_size(end, limit) < size) in split_nodes_interleave()
174 end = limit; in split_nodes_interleave()
194 while (end - start - mem_hole_size(start, end) < size) { in find_end_of_node()
201 return end; in find_end_of_node()
300 if (end < dma32_end && dma32_end - end - in split_nodes_size_interleave_uniform()
309 if ((limit - end - mem_hole_size(end, limit) < size) in split_nodes_size_interleave_uniform()
[all …]
A Dpagewalk.c54 if (addr >= end - PAGE_SIZE) in walk_pte_range_inner()
111 next = pmd_addr_end(addr, end); in walk_pmd_range()
181 next = pud_addr_end(addr, end); in walk_pud_range()
245 next = p4d_addr_end(addr, end); in walk_p4d_range()
313 unsigned long end) in hugetlb_entry_end() argument
316 return boundary < end ? boundary : end; in hugetlb_entry_end()
468 if (start >= end) in walk_page_range_mm()
480 next = end; in walk_page_range_mm()
618 if (start >= end) in walk_kernel_page_table_range()
667 if (start >= end || !walk.mm) in walk_page_range_debug()
[all …]
A Dhmm.c55 for (; addr < end; addr += PAGE_SIZE, i++) { in hmm_pfns_fill()
89 for (; addr < end; addr += PAGE_SIZE) in hmm_vma_fault()
167 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole()
178 return hmm_pfns_fill(addr, end, range, 0); in hmm_vma_walk_hole()
207 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd()
331 unsigned long end, in hmm_vma_walk_pmd() argument
346 return hmm_vma_walk_hole(start, end, -1, walk); in hmm_vma_walk_pmd()
354 return hmm_pfns_fill(start, end, range, 0); in hmm_vma_walk_pmd()
437 return hmm_vma_walk_hole(start, end, -1, walk); in hmm_vma_walk_pud()
447 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_pud()
[all …]
A Dmadvise.c63 unsigned long end; member
289 unsigned long end = madv_behavior->range.end; in madvise_willneed() local
902 range->end = ALIGN_DOWN(range->end, huge_page_size(hstate_vma(vma))); in madvise_dontneed_free_valid_vma()
974 unsigned long end = madv_behavior->range.end; in madvise_populate() local
1019 unsigned long end = madv_behavior->range.end; in madvise_remove() local
1446 unsigned long end = madv_behavior->range.end; in madvise_inject_error() local
2136 unsigned long end; in madvise_set_anon_name() local
2153 end = start + len; in madvise_set_anon_name()
2154 if (end < start) in madvise_set_anon_name()
2157 if (end == start) in madvise_set_anon_name()
[all …]
A Dmemblock.c319 end = max(start, end); in memblock_find_in_range_node()
656 if (rbase >= end) in memblock_add_range()
680 base = min(rend, end); in memblock_add_range()
684 if (base < end) { in memblock_add_range()
832 if (rbase >= end) in memblock_isolate_range()
853 rgn->base = end; in memblock_isolate_range()
2095 if (start < end) { in memblock_trim_memory()
2259 while (start < end) { in __free_pages_memory()
2282 phys_addr_t end) in __free_memory_core() argument
2484 void *start, *end; in reserve_mem_release_by_name() local
[all …]
A Dmapping_dirty_helpers.c70 pgoff_t end; member
90 unsigned long end, struct mm_walk *walk) in clean_record_pte() argument
111 cwalk->end = max(cwalk->end, pgoff + 1); in clean_record_pte()
174 wpwalk->tlbflush_start = end; in wp_clean_pre_vma()
178 walk->mm, start, end); in wp_clean_pre_vma()
180 flush_cache_range(walk->vma, start, end); in wp_clean_pre_vma()
204 wpwalk->range.end); in wp_clean_post_vma()
318 pgoff_t *end) in clean_record_shared_mapping_range() argument
320 bool none_set = (*start >= *end); in clean_record_shared_mapping_range()
326 .end = none_set ? 0 : *end, in clean_record_shared_mapping_range()
[all …]
A Dsparse-vmemmap.c144 unsigned long start, unsigned long end) in vmemmap_verify() argument
151 start, end - 1); in vmemmap_verify()
281 unsigned long end, int node, in vmemmap_populate_range() argument
289 for (; addr < end; addr += PAGE_SIZE) { in vmemmap_populate_range()
353 flush_tlb_kernel_range(addr, end); in vmemmap_undo_hvo()
355 return vmemmap_populate(addr, end, node, NULL); in vmemmap_undo_hvo()
426 for (addr = start; addr < end; addr = next) { in vmemmap_populate_hugepages()
427 next = pmd_addr_end(addr, end); in vmemmap_populate_hugepages()
508 unsigned long end, int node, in vmemmap_populate_compound_pages() argument
530 for (addr = start; addr < end; addr += size) { in vmemmap_populate_compound_pages()
[all …]
A Dmseal.c39 unsigned long start, unsigned long end) in range_contains_unmapped() argument
45 for_each_vma_range(vmi, vma, end) { in range_contains_unmapped()
52 return prev_end < end; in range_contains_unmapped()
56 unsigned long start, unsigned long end) in mseal_apply() argument
68 for_each_vma_range(vmi, vma, end) { in mseal_apply()
69 unsigned long curr_end = MIN(vma->vm_end, end); in mseal_apply()
143 unsigned long end; in do_mseal() local
159 end = start + len; in do_mseal()
160 if (end < start) in do_mseal()
163 if (end == start) in do_mseal()
[all …]
A Dmprotect.c525 next = pmd_addr_end(addr, end); in change_pmd_range()
609 vma->vm_mm, addr, end); in change_pud_range()
672 BUG_ON(addr >= end); in change_protection_range()
884 end = start + len; in do_mprotect_pkey()
885 if (end <= start) in do_mprotect_pkey()
904 vma = vma_find(&vmi, end); in do_mprotect_pkey()
910 if (vma->vm_start >= end) in do_mprotect_pkey()
920 end = vma->vm_end; in do_mprotect_pkey()
981 if (tmp > end) in do_mprotect_pkey()
982 tmp = end; in do_mprotect_pkey()
[all …]
A Dmincore.c27 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument
40 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb()
79 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range()
99 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range()
111 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
126 for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { in mincore_pte_range()
138 unsigned int max_nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
205 unsigned long end; in do_mincore() local
211 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
213 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore()
[all …]
A Dmlock.c517 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local
523 end = start + len; in apply_vma_lock_flags()
524 if (end < start) in apply_vma_lock_flags()
526 if (end == start) in apply_vma_lock_flags()
549 if (tmp > end) in apply_vma_lock_flags()
550 tmp = end; in apply_vma_lock_flags()
558 if (tmp < end) in apply_vma_lock_flags()
576 unsigned long end; in count_mm_mlocked_page_nr() local
581 end = ULONG_MAX; in count_mm_mlocked_page_nr()
583 end = start + len; in count_mm_mlocked_page_nr()
[all …]
A Dvmalloc.c302 BUG_ON(addr >= end); in vmap_range_noflush()
374 size = end - addr; in vunmap_pte_range()
476 BUG_ON(addr >= end); in __vunmap_range_noflush()
621 BUG_ON(addr >= end); in vmap_small_pages_range_noflush()
707 unsigned long end) in check_sparse_vm_area() argument
750 unsigned long end) in vm_area_unmap_pages() argument
755 vunmap_range(start, end); in vm_area_unmap_pages()
2254 kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH); in kasan_release_vmalloc_node()
2328 end = max(end, list_last_entry(&vn->purge_list, in __purge_vmap_area_lazy()
2937 end = max(e, end); in _vm_unmap_aliases()
[all …]
A Dvma.c15 unsigned long end; member
58 .end = (map_)->end, \
815 unsigned long end = vmg->end; in vma_merge_existing_range() local
966 vmg->end = start; in vma_merge_existing_range()
1046 unsigned long end = vmg->end; in vma_merge_new_range() local
1082 vmg->end = end; in vma_merge_new_range()
1487 vms->end = end; in init_vma_munmap()
1566 unsigned long end; in do_vmi_munmap() local
1573 if (end == start) in do_vmi_munmap()
1604 unsigned long end = vmg->end; in vma_modify() local
[all …]
A Dtruncate.c202 if (pos + size <= (u64)end) in truncate_inode_partial_folio()
205 length = end + 1 - pos - offset; in truncate_inode_partial_folio()
345 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() local
369 end = -1; in truncate_inode_pages_range()
371 end = (lend + 1) >> PAGE_SHIFT; in truncate_inode_pages_range()
375 while (index < end && find_lock_entries(mapping, &index, end - 1, in truncate_inode_pages_range()
394 end = folio->index; in truncate_inode_pages_range()
406 end = folio->index; in truncate_inode_pages_range()
413 while (index < end) { in truncate_inode_pages_range()
575 pgoff_t start, pgoff_t end) in invalidate_mapping_pages() argument
[all …]
A Dhugetlb_vmemmap.c166 VM_BUG_ON(!PAGE_ALIGNED(start | end)); in vmemmap_remap_range()
176 flush_tlb_kernel_range(start, end); in vmemmap_remap_range()
300 return vmemmap_remap_range(reuse, end, &walk); in vmemmap_remap_split()
364 ret = vmemmap_remap_range(reuse, end, &walk); in vmemmap_remap_free()
366 end = reuse + walk.nr_walked * PAGE_SIZE; in vmemmap_remap_free()
380 vmemmap_remap_range(reuse, end, &walk); in vmemmap_remap_free()
440 return vmemmap_remap_range(reuse, end, &walk); in vmemmap_remap_alloc()
792 unsigned long start, end; in hugetlb_vmemmap_init_early() local
821 if (vmemmap_populate_hvo(start, end, nid, in hugetlb_vmemmap_init_early()
844 unsigned long phys, nr_pages, start, end; in hugetlb_vmemmap_init_late() local
[all …]
A Dmemtest.c42 u64 *p, *start, *end; in memtest() local
49 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest()
53 for (p = start; p < end; p++) in memtest()
56 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
73 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument
80 this_start = clamp(this_start, start, end); in do_one_pass()
81 this_end = clamp(this_end, start, end); in do_one_pass()
107 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument
118 do_one_pass(patterns[idx], start, end); in early_memtest()
A Dmsync.c34 unsigned long end; in SYSCALL_DEFINE3() local
50 end = start + len; in SYSCALL_DEFINE3()
51 if (end < start) in SYSCALL_DEFINE3()
54 if (end == start) in SYSCALL_DEFINE3()
77 if (start >= end) in SYSCALL_DEFINE3()
90 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
98 if (error || start >= end) in SYSCALL_DEFINE3()
103 if (start >= end) { in SYSCALL_DEFINE3()
A Dgup.c1843 VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); in populate_vma_page_range()
1948 unsigned long end, nstart, nend; in __mm_populate() local
1953 end = start + len; in __mm_populate()
1973 nend = min(end, vma->vm_end); in __mm_populate()
3045 next = pmd_addr_end(addr, end); in gup_fast_pmd_range()
3077 next = pud_addr_end(addr, end); in gup_fast_pud_range()
3103 next = p4d_addr_end(addr, end); in gup_fast_p4d_range()
3125 next = pgd_addr_end(addr, end); in gup_fast_pgd_range()
3201 unsigned long len, end; in gup_fast_fallback() local
3222 if (end > TASK_SIZE_MAX) in gup_fast_fallback()
[all …]
A Dmigrate_device.c21 unsigned long end, in migrate_vma_collect_skip() argument
27 for (addr = start; addr < end; addr += PAGE_SIZE) { in migrate_vma_collect_skip()
36 unsigned long end, in migrate_vma_collect_hole() argument
59 unsigned long end, in migrate_vma_collect_pmd() argument
95 return migrate_vma_collect_skip(start, end, in migrate_vma_collect_pmd()
98 return migrate_vma_collect_skip(start, end, in migrate_vma_collect_pmd()
105 return migrate_vma_collect_skip(start, end, in migrate_vma_collect_pmd()
115 for (; addr < end; addr += PAGE_SIZE, ptep++) { in migrate_vma_collect_pmd()
288 flush_tlb_range(walk->vma, start, end); in migrate_vma_collect_pmd()
541 args->end &= PAGE_MASK; in migrate_vma_setup()
[all …]
A Dpage_poison.c52 unsigned char *end; in check_poison_mem() local
58 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem()
59 if (*end != PAGE_POISON) in check_poison_mem()
65 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem()
71 end - start + 1, 1); in check_poison_mem()
A Dinternal.h453 unsigned long addr, unsigned long end,
477 loff_t end);
963 unsigned long end, bool write, int *locked);
984 unsigned long start, unsigned long end) in folio_within_range() argument
990 if (start > end) in folio_within_range()
996 if (end > vma->vm_end) in folio_within_range()
997 end = vma->vm_end; in folio_within_range()
1378 unsigned long end, pgprot_t prot,
1525 unsigned long start, unsigned long end, in vma_set_range() argument
1529 vma->vm_end = end; in vma_set_range()
[all …]
/mm/damon/tests/
A Dvaddr-kunit.h87 KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); in damon_test_three_regions_in_vmas()
89 KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); in damon_test_three_regions_in_vmas()
91 KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); in damon_test_three_regions_in_vmas()
168 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions1()
169 (struct damon_addr_range){.start = 45, .end = 55}, in damon_test_apply_three_regions1()
190 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions2()
191 (struct damon_addr_range){.start = 56, .end = 57}, in damon_test_apply_three_regions2()
214 (struct damon_addr_range){.start = 5, .end = 27}, in damon_test_apply_three_regions3()
239 (struct damon_addr_range){.start = 5, .end = 7}, in damon_test_apply_three_regions4()
262 KUNIT_EXPECT_EQ(test, r->ar.end, end); in damon_test_split_evenly_fail()
[all …]
/mm/kasan/
A Dinit.c93 unsigned long end) in zero_pte_populate() argument
110 unsigned long end) in zero_pmd_populate() argument
116 next = pmd_addr_end(addr, end); in zero_pmd_populate()
145 unsigned long end) in zero_pud_populate() argument
183 unsigned long end) in zero_p4d_populate() argument
347 unsigned long end) in kasan_remove_pte_table() argument
354 if (next > end) in kasan_remove_pte_table()
355 next = end; in kasan_remove_pte_table()
369 unsigned long end) in kasan_remove_pmd_table() argument
395 unsigned long end) in kasan_remove_pud_table() argument
[all …]
/mm/kmsan/
A Dhooks.c136 __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end)); in kmsan_vunmap_range_noflush()
138 flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); in kmsan_vunmap_range_noflush()
139 flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); in kmsan_vunmap_range_noflush()
159 nr = (end - start) / PAGE_SIZE; in kmsan_ioremap_page_range()
211 flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); in kmsan_ioremap_page_range()
212 flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); in kmsan_ioremap_page_range()
226 nr = (end - start) / PAGE_SIZE; in kmsan_iounmap_page_range()
234 __vunmap_range_noflush(v_shadow, vmalloc_shadow(end)); in kmsan_iounmap_page_range()
235 __vunmap_range_noflush(v_origin, vmalloc_origin(end)); in kmsan_iounmap_page_range()
241 flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); in kmsan_iounmap_page_range()
[all …]

Completed in 96 milliseconds

1234