Lines Matching refs:end
94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vmap_pte_range() argument
120 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); in vmap_pte_range()
132 } while (pte += PFN_DOWN(size), addr += size, addr != end); in vmap_pte_range()
139 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, in vmap_try_huge_pmd() argument
149 if ((end - addr) != PMD_SIZE) in vmap_try_huge_pmd()
164 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vmap_pmd_range() argument
175 next = pmd_addr_end(addr, end); in vmap_pmd_range()
185 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pmd_range()
189 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, in vmap_try_huge_pud() argument
199 if ((end - addr) != PUD_SIZE) in vmap_try_huge_pud()
214 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in vmap_pud_range() argument
225 next = pud_addr_end(addr, end); in vmap_pud_range()
236 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pud_range()
240 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, in vmap_try_huge_p4d() argument
250 if ((end - addr) != P4D_SIZE) in vmap_try_huge_p4d()
265 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vmap_p4d_range() argument
276 next = p4d_addr_end(addr, end); in vmap_p4d_range()
287 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); in vmap_p4d_range()
291 static int vmap_range_noflush(unsigned long addr, unsigned long end, in vmap_range_noflush() argument
302 BUG_ON(addr >= end); in vmap_range_noflush()
307 next = pgd_addr_end(addr, end); in vmap_range_noflush()
312 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_range_noflush()
315 arch_sync_kernel_mappings(start, end); in vmap_range_noflush()
320 int vmap_page_range(unsigned long addr, unsigned long end, in vmap_page_range() argument
325 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), in vmap_page_range()
327 flush_cache_vmap(addr, end); in vmap_page_range()
329 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, in vmap_page_range()
334 int ioremap_page_range(unsigned long addr, unsigned long end, in ioremap_page_range() argument
345 (void *)end != area->addr + get_vm_area_size(area)) { in ioremap_page_range()
347 addr, end, (long)area->addr, in ioremap_page_range()
351 return vmap_page_range(addr, end, phys_addr, prot); in ioremap_page_range()
354 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vunmap_pte_range() argument
373 if (WARN_ON(end - addr < size)) in vunmap_pte_range()
374 size = end - addr; in vunmap_pte_range()
379 } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); in vunmap_pte_range()
385 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vunmap_pmd_range() argument
394 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
409 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
412 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in vunmap_pud_range() argument
421 next = pud_addr_end(addr, end); in vunmap_pud_range()
434 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
437 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vunmap_p4d_range() argument
445 next = p4d_addr_end(addr, end); in vunmap_p4d_range()
454 } while (p4d++, addr = next, addr != end); in vunmap_p4d_range()
469 void __vunmap_range_noflush(unsigned long start, unsigned long end) in __vunmap_range_noflush() argument
476 BUG_ON(addr >= end); in __vunmap_range_noflush()
479 next = pgd_addr_end(addr, end); in __vunmap_range_noflush()
485 } while (pgd++, addr = next, addr != end); in __vunmap_range_noflush()
488 arch_sync_kernel_mappings(start, end); in __vunmap_range_noflush()
491 void vunmap_range_noflush(unsigned long start, unsigned long end) in vunmap_range_noflush() argument
493 kmsan_vunmap_range_noflush(start, end); in vunmap_range_noflush()
494 __vunmap_range_noflush(start, end); in vunmap_range_noflush()
506 void vunmap_range(unsigned long addr, unsigned long end) in vunmap_range() argument
508 flush_cache_vunmap(addr, end); in vunmap_range()
509 vunmap_range_noflush(addr, end); in vunmap_range()
510 flush_tlb_kernel_range(addr, end); in vunmap_range()
514 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pages_pte_range() argument
549 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pages_pte_range()
558 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pages_pmd_range() argument
568 next = pmd_addr_end(addr, end); in vmap_pages_pmd_range()
571 } while (pmd++, addr = next, addr != end); in vmap_pages_pmd_range()
576 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pages_pud_range() argument
586 next = pud_addr_end(addr, end); in vmap_pages_pud_range()
589 } while (pud++, addr = next, addr != end); in vmap_pages_pud_range()
594 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pages_p4d_range() argument
604 next = p4d_addr_end(addr, end); in vmap_pages_p4d_range()
607 } while (p4d++, addr = next, addr != end); in vmap_pages_p4d_range()
611 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, in vmap_small_pages_range_noflush() argument
621 BUG_ON(addr >= end); in vmap_small_pages_range_noflush()
624 next = pgd_addr_end(addr, end); in vmap_small_pages_range_noflush()
630 } while (pgd++, addr = next, addr != end); in vmap_small_pages_range_noflush()
633 arch_sync_kernel_mappings(start, end); in vmap_small_pages_range_noflush()
647 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, in __vmap_pages_range_noflush() argument
650 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; in __vmap_pages_range_noflush()
656 return vmap_small_pages_range_noflush(addr, end, prot, pages); in __vmap_pages_range_noflush()
673 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, in vmap_pages_range_noflush() argument
676 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, in vmap_pages_range_noflush()
681 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); in vmap_pages_range_noflush()
696 int vmap_pages_range(unsigned long addr, unsigned long end, in vmap_pages_range() argument
701 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); in vmap_pages_range()
702 flush_cache_vmap(addr, end); in vmap_pages_range()
707 unsigned long end) in check_sparse_vm_area() argument
716 if ((end - start) >> PAGE_SHIFT > totalram_pages()) in check_sparse_vm_area()
719 (void *)end > area->addr + get_vm_area_size(area)) in check_sparse_vm_area()
732 unsigned long end, struct page **pages) in vm_area_map_pages() argument
736 err = check_sparse_vm_area(area, start, end); in vm_area_map_pages()
740 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); in vm_area_map_pages()
750 unsigned long end) in vm_area_unmap_pages() argument
752 if (check_sparse_vm_area(area, start, end)) in vm_area_unmap_pages()
755 vunmap_range(start, end); in vm_area_unmap_pages()
2242 unsigned long start, end; in kasan_release_vmalloc_node() local
2245 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end; in kasan_release_vmalloc_node()
2254 kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH); in kasan_release_vmalloc_node()
2295 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, in __purge_vmap_area_lazy() argument
2328 end = max(end, list_last_entry(&vn->purge_list, in __purge_vmap_area_lazy()
2336 flush_tlb_kernel_range(start, end); in __purge_vmap_area_lazy()
2371 trace_purge_vmap_area_lazy(start, end, nr_purged_areas); in __purge_vmap_area_lazy()
2904 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) in _vm_unmap_aliases() argument
2937 end = max(e, end); in _vm_unmap_aliases()
2951 if (!__purge_vmap_area_lazy(start, end, false) && flush) in _vm_unmap_aliases()
2952 flush_tlb_kernel_range(start, end); in _vm_unmap_aliases()
3160 unsigned long start, unsigned long end, int node, in __get_vm_area_node() argument
3187 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3209 unsigned long start, unsigned long end, in __get_vm_area_caller() argument
3212 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, in __get_vm_area_caller()
3315 unsigned long start = ULONG_MAX, end = 0; in vm_reset_perms() local
3332 end = max(addr + page_size, end); in vm_reset_perms()
3343 _vm_unmap_aliases(start, end, flush_dmap); in vm_reset_perms()
3816 unsigned long start, unsigned long end, gfp_t gfp_mask, in __vmalloc_node_range_noprof() argument
3854 VM_UNINITIALIZED | vm_flags, start, end, node, in __vmalloc_node_range_noprof()
4701 unsigned long base, start, size, end, last_end, orig_start, orig_end; in pcpu_get_vm_areas() local
4708 end = start + sizes[area]; in pcpu_get_vm_areas()
4722 BUG_ON(start2 < end && start < end2); in pcpu_get_vm_areas()
4749 end = start + sizes[area]; in pcpu_get_vm_areas()
4752 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4772 if (base + end > va->va_end) { in pcpu_get_vm_areas()
4773 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4783 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4797 end = start + sizes[area]; in pcpu_get_vm_areas()
4798 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()