Lines Matching refs:addr

81 	unsigned long addr = (unsigned long)kasan_reset_tag(x);  in is_vmalloc_addr()  local
83 return addr >= VMALLOC_START && addr < VMALLOC_END; in is_vmalloc_addr()
94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vmap_pte_range() argument
104 pte = pte_alloc_kernel_track(pmd, addr, mask); in vmap_pte_range()
120 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); in vmap_pte_range()
125 set_huge_pte_at(&init_mm, addr, pte, entry, size); in vmap_pte_range()
130 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); in vmap_pte_range()
132 } while (pte += PFN_DOWN(size), addr += size, addr != end); in vmap_pte_range()
139 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, in vmap_try_huge_pmd() argument
149 if ((end - addr) != PMD_SIZE) in vmap_try_huge_pmd()
152 if (!IS_ALIGNED(addr, PMD_SIZE)) in vmap_try_huge_pmd()
158 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) in vmap_try_huge_pmd()
164 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vmap_pmd_range() argument
171 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in vmap_pmd_range()
175 next = pmd_addr_end(addr, end); in vmap_pmd_range()
177 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, in vmap_pmd_range()
183 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) in vmap_pmd_range()
185 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pmd_range()
189 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, in vmap_try_huge_pud() argument
199 if ((end - addr) != PUD_SIZE) in vmap_try_huge_pud()
202 if (!IS_ALIGNED(addr, PUD_SIZE)) in vmap_try_huge_pud()
208 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) in vmap_try_huge_pud()
214 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in vmap_pud_range() argument
221 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in vmap_pud_range()
225 next = pud_addr_end(addr, end); in vmap_pud_range()
227 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, in vmap_pud_range()
233 if (vmap_pmd_range(pud, addr, next, phys_addr, prot, in vmap_pud_range()
236 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pud_range()
240 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, in vmap_try_huge_p4d() argument
250 if ((end - addr) != P4D_SIZE) in vmap_try_huge_p4d()
253 if (!IS_ALIGNED(addr, P4D_SIZE)) in vmap_try_huge_p4d()
259 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) in vmap_try_huge_p4d()
265 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vmap_p4d_range() argument
272 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in vmap_p4d_range()
276 next = p4d_addr_end(addr, end); in vmap_p4d_range()
278 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, in vmap_p4d_range()
284 if (vmap_pud_range(p4d, addr, next, phys_addr, prot, in vmap_p4d_range()
287 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); in vmap_p4d_range()
291 static int vmap_range_noflush(unsigned long addr, unsigned long end, in vmap_range_noflush() argument
302 BUG_ON(addr >= end); in vmap_range_noflush()
304 start = addr; in vmap_range_noflush()
305 pgd = pgd_offset_k(addr); in vmap_range_noflush()
307 next = pgd_addr_end(addr, end); in vmap_range_noflush()
308 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, in vmap_range_noflush()
312 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_range_noflush()
320 int vmap_page_range(unsigned long addr, unsigned long end, in vmap_page_range() argument
325 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), in vmap_page_range()
327 flush_cache_vmap(addr, end); in vmap_page_range()
329 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, in vmap_page_range()
334 int ioremap_page_range(unsigned long addr, unsigned long end, in ioremap_page_range() argument
339 area = find_vm_area((void *)addr); in ioremap_page_range()
341 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); in ioremap_page_range()
344 if (addr != (unsigned long)area->addr || in ioremap_page_range()
345 (void *)end != area->addr + get_vm_area_size(area)) { in ioremap_page_range()
347 addr, end, (long)area->addr, in ioremap_page_range()
348 (long)area->addr + get_vm_area_size(area)); in ioremap_page_range()
351 return vmap_page_range(addr, end, phys_addr, prot); in ioremap_page_range()
354 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vunmap_pte_range() argument
361 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
366 size = arch_vmap_pte_range_unmap_size(addr, pte); in vunmap_pte_range()
368 if (WARN_ON(!IS_ALIGNED(addr, size))) { in vunmap_pte_range()
369 addr = ALIGN_DOWN(addr, size); in vunmap_pte_range()
372 ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size); in vunmap_pte_range()
373 if (WARN_ON(end - addr < size)) in vunmap_pte_range()
374 size = end - addr; in vunmap_pte_range()
377 ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
379 } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); in vunmap_pte_range()
385 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vunmap_pmd_range() argument
392 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
394 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
401 WARN_ON(next - addr < PMD_SIZE); in vunmap_pmd_range()
406 vunmap_pte_range(pmd, addr, next, mask); in vunmap_pmd_range()
409 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
412 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in vunmap_pud_range() argument
419 pud = pud_offset(p4d, addr); in vunmap_pud_range()
421 next = pud_addr_end(addr, end); in vunmap_pud_range()
428 WARN_ON(next - addr < PUD_SIZE); in vunmap_pud_range()
433 vunmap_pmd_range(pud, addr, next, mask); in vunmap_pud_range()
434 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
437 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vunmap_p4d_range() argument
443 p4d = p4d_offset(pgd, addr); in vunmap_p4d_range()
445 next = p4d_addr_end(addr, end); in vunmap_p4d_range()
453 vunmap_pud_range(p4d, addr, next, mask); in vunmap_p4d_range()
454 } while (p4d++, addr = next, addr != end); in vunmap_p4d_range()
473 unsigned long addr = start; in __vunmap_range_noflush() local
476 BUG_ON(addr >= end); in __vunmap_range_noflush()
477 pgd = pgd_offset_k(addr); in __vunmap_range_noflush()
479 next = pgd_addr_end(addr, end); in __vunmap_range_noflush()
484 vunmap_p4d_range(pgd, addr, next, &mask); in __vunmap_range_noflush()
485 } while (pgd++, addr = next, addr != end); in __vunmap_range_noflush()
506 void vunmap_range(unsigned long addr, unsigned long end) in vunmap_range() argument
508 flush_cache_vunmap(addr, end); in vunmap_range()
509 vunmap_range_noflush(addr, end); in vunmap_range()
510 flush_tlb_kernel_range(addr, end); in vunmap_range()
513 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pages_pte_range() argument
525 pte = pte_alloc_kernel_track(pmd, addr, mask); in vmap_pages_pte_range()
547 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pages_pte_range()
549 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pages_pte_range()
557 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, in vmap_pages_pmd_range() argument
564 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in vmap_pages_pmd_range()
568 next = pmd_addr_end(addr, end); in vmap_pages_pmd_range()
569 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) in vmap_pages_pmd_range()
571 } while (pmd++, addr = next, addr != end); in vmap_pages_pmd_range()
575 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, in vmap_pages_pud_range() argument
582 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in vmap_pages_pud_range()
586 next = pud_addr_end(addr, end); in vmap_pages_pud_range()
587 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) in vmap_pages_pud_range()
589 } while (pud++, addr = next, addr != end); in vmap_pages_pud_range()
593 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, in vmap_pages_p4d_range() argument
600 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in vmap_pages_p4d_range()
604 next = p4d_addr_end(addr, end); in vmap_pages_p4d_range()
605 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) in vmap_pages_p4d_range()
607 } while (p4d++, addr = next, addr != end); in vmap_pages_p4d_range()
611 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, in vmap_small_pages_range_noflush() argument
614 unsigned long start = addr; in vmap_small_pages_range_noflush()
621 BUG_ON(addr >= end); in vmap_small_pages_range_noflush()
622 pgd = pgd_offset_k(addr); in vmap_small_pages_range_noflush()
624 next = pgd_addr_end(addr, end); in vmap_small_pages_range_noflush()
627 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); in vmap_small_pages_range_noflush()
630 } while (pgd++, addr = next, addr != end); in vmap_small_pages_range_noflush()
647 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, in __vmap_pages_range_noflush() argument
650 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; in __vmap_pages_range_noflush()
656 return vmap_small_pages_range_noflush(addr, end, prot, pages); in __vmap_pages_range_noflush()
661 err = vmap_range_noflush(addr, addr + (1UL << page_shift), in __vmap_pages_range_noflush()
667 addr += 1UL << page_shift; in __vmap_pages_range_noflush()
673 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, in vmap_pages_range_noflush() argument
676 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, in vmap_pages_range_noflush()
681 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); in vmap_pages_range_noflush()
696 int vmap_pages_range(unsigned long addr, unsigned long end, in vmap_pages_range() argument
701 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); in vmap_pages_range()
702 flush_cache_vmap(addr, end); in vmap_pages_range()
718 if (start < (unsigned long)area->addr || in check_sparse_vm_area()
719 (void *)end > area->addr + get_vm_area_size(area)) in check_sparse_vm_area()
766 unsigned long addr = (unsigned long)kasan_reset_tag(x); in is_vmalloc_or_module_addr() local
767 if (addr >= MODULES_VADDR && addr < MODULES_END) in is_vmalloc_or_module_addr()
781 unsigned long addr = (unsigned long) vmalloc_addr; in vmalloc_to_page() local
783 pgd_t *pgd = pgd_offset_k(addr); in vmalloc_to_page()
802 p4d = p4d_offset(pgd, addr); in vmalloc_to_page()
806 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); in vmalloc_to_page()
810 pud = pud_offset(p4d, addr); in vmalloc_to_page()
814 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in vmalloc_to_page()
818 pmd = pmd_offset(pud, addr); in vmalloc_to_page()
822 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in vmalloc_to_page()
826 ptep = pte_offset_kernel(pmd, addr); in vmalloc_to_page()
949 addr_to_node_id(unsigned long addr) in addr_to_node_id() argument
951 return (addr / vmap_zone_size) % nr_vmap_nodes; in addr_to_node_id()
955 addr_to_node(unsigned long addr) in addr_to_node() argument
957 return &vmap_nodes[addr_to_node_id(addr)]; in addr_to_node()
1059 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) in __find_vmap_area() argument
1063 addr = (unsigned long)kasan_reset_tag((void *)addr); in __find_vmap_area()
1069 if (addr < va->va_start) in __find_vmap_area()
1071 else if (addr >= va->va_end) in __find_vmap_area()
1082 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) in __find_vmap_area_exceed_addr() argument
1087 addr = (unsigned long)kasan_reset_tag((void *)addr); in __find_vmap_area_exceed_addr()
1093 if (tmp->va_end > addr) { in __find_vmap_area_exceed_addr()
1095 if (tmp->va_start <= addr) in __find_vmap_area_exceed_addr()
1114 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) in find_vmap_area_exceed_addr_lock() argument
1124 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1970 unsigned long *addr, unsigned int *vn_id) in node_alloc() argument
1975 *addr = -EINVAL; in node_alloc()
1990 *addr = va->va_start; in node_alloc()
1999 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
2018 unsigned long addr; in alloc_vmap_area() local
2039 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); in alloc_vmap_area()
2055 if (IS_ERR_VALUE(addr)) { in alloc_vmap_area()
2057 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, in alloc_vmap_area()
2062 trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); in alloc_vmap_area()
2068 if (IS_ERR_VALUE(addr)) in alloc_vmap_area()
2071 va->va_start = addr; in alloc_vmap_area()
2072 va->va_end = addr + size; in alloc_vmap_area()
2077 vm->addr = (void *)va->va_start; in alloc_vmap_area()
2092 ret = kasan_populate_vmalloc(addr, size); in alloc_vmap_area()
2444 struct vmap_area *find_vmap_area(unsigned long addr) in find_vmap_area() argument
2466 i = j = addr_to_node_id(addr); in find_vmap_area()
2471 va = __find_vmap_area(addr, &vn->busy.root); in find_vmap_area()
2481 static struct vmap_area *find_unlink_vmap_area(unsigned long addr) in find_unlink_vmap_area() argument
2490 i = j = addr_to_node_id(addr); in find_unlink_vmap_area()
2495 va = __find_vmap_area(addr, &vn->busy.root); in find_unlink_vmap_area()
2611 addr_to_vb_xa(unsigned long addr) in addr_to_vb_xa() argument
2613 int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; in addr_to_vb_xa()
2633 static unsigned long addr_to_vb_idx(unsigned long addr) in addr_to_vb_idx() argument
2635 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
2636 addr /= VMAP_BLOCK_SIZE; in addr_to_vb_idx()
2637 return addr; in addr_to_vb_idx()
2642 unsigned long addr; in vmap_block_vaddr() local
2644 addr = va_start + (pages_off << PAGE_SHIFT); in vmap_block_vaddr()
2645 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); in vmap_block_vaddr()
2646 return (void *)addr; in vmap_block_vaddr()
2862 static void vb_free(unsigned long addr, unsigned long size) in vb_free() argument
2872 flush_cache_vunmap(addr, addr + size); in vb_free()
2875 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; in vb_free()
2877 xa = addr_to_vb_xa(addr); in vb_free()
2878 vb = xa_load(xa, addr_to_vb_idx(addr)); in vb_free()
2884 vunmap_range_noflush(addr, addr + size); in vb_free()
2887 flush_tlb_kernel_range(addr, addr + size); in vb_free()
2983 unsigned long addr = (unsigned long)kasan_reset_tag(mem); in vm_unmap_ram() local
2987 BUG_ON(!addr); in vm_unmap_ram()
2988 BUG_ON(addr < VMALLOC_START); in vm_unmap_ram()
2989 BUG_ON(addr > VMALLOC_END); in vm_unmap_ram()
2990 BUG_ON(!PAGE_ALIGNED(addr)); in vm_unmap_ram()
2996 vb_free(addr, size); in vm_unmap_ram()
3000 va = find_unlink_vmap_area(addr); in vm_unmap_ram()
3026 unsigned long addr; in vm_map_ram() local
3033 addr = (unsigned long)mem; in vm_map_ram()
3043 addr = va->va_start; in vm_map_ram()
3044 mem = (void *)addr; in vm_map_ram()
3047 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, in vm_map_ram()
3105 if (tmp->addr >= vm->addr) { in vm_area_add_early()
3106 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
3109 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
3129 unsigned long addr = ALIGN(VMALLOC_START, align); in vm_area_register_early() local
3135 if ((unsigned long)cur->addr - addr >= vm->size) in vm_area_register_early()
3137 addr = ALIGN((unsigned long)cur->addr + cur->size, align); in vm_area_register_early()
3140 BUG_ON(addr > VMALLOC_END - vm->size); in vm_area_register_early()
3141 vm->addr = (void *)addr; in vm_area_register_early()
3144 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); in vm_area_register_early()
3202 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
3253 struct vm_struct *find_vm_area(const void *addr) in find_vm_area() argument
3257 va = find_vmap_area((unsigned long)addr); in find_vm_area()
3274 struct vm_struct *remove_vm_area(const void *addr) in remove_vm_area() argument
3281 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", in remove_vm_area()
3282 addr)) in remove_vm_area()
3285 va = find_unlink_vmap_area((unsigned long)addr); in remove_vm_area()
3290 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); in remove_vm_area()
3291 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); in remove_vm_area()
3293 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); in remove_vm_area()
3325 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_reset_perms() local
3327 if (addr) { in vm_reset_perms()
3331 start = min(addr, start); in vm_reset_perms()
3332 end = max(addr + page_size, end); in vm_reset_perms()
3363 void vfree_atomic(const void *addr) in vfree_atomic() argument
3368 kmemleak_free(addr); in vfree_atomic()
3376 if (addr && llist_add((struct llist_node *)addr, &p->list)) in vfree_atomic()
3397 void vfree(const void *addr) in vfree() argument
3403 vfree_atomic(addr); in vfree()
3408 kmemleak_free(addr); in vfree()
3411 if (!addr) in vfree()
3414 vm = remove_vm_area(addr); in vfree()
3417 addr); in vfree()
3453 void vunmap(const void *addr) in vunmap() argument
3460 if (!addr) in vunmap()
3462 vm = remove_vm_area(addr); in vunmap()
3465 addr); in vunmap()
3491 unsigned long addr; in vmap() local
3514 addr = (unsigned long)area->addr; in vmap()
3515 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), in vmap()
3517 vunmap(area->addr); in vmap()
3525 return area->addr; in vmap()
3536 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) in vmap_pfn_apply() argument
3546 set_pte_at(&init_mm, addr, pte, ptent); in vmap_pfn_apply()
3570 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
3576 flush_cache_vmap((unsigned long)area->addr, in vmap_pfn()
3577 (unsigned long)area->addr + count * PAGE_SIZE); in vmap_pfn()
3579 return area->addr; in vmap_pfn()
3680 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node() local
3762 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3780 return area->addr; in __vmalloc_area_node()
3783 vfree(area->addr); in __vmalloc_area_node()
3910 area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); in __vmalloc_node_range_noprof()
3922 return area->addr; in __vmalloc_node_range_noprof()
4257 const char *addr, size_t count) in aligned_vread_iter() argument
4266 offset = offset_in_page(addr); in aligned_vread_iter()
4270 page = vmalloc_to_page(addr); in aligned_vread_iter()
4285 addr += copied; in aligned_vread_iter()
4300 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, in vmap_ram_vread_iter() argument
4316 return aligned_vread_iter(iter, addr, count); in vmap_ram_vread_iter()
4324 xa = addr_to_vb_xa((unsigned long) addr); in vmap_ram_vread_iter()
4325 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); in vmap_ram_vread_iter()
4343 if (addr < start) { in vmap_ram_vread_iter()
4344 size_t to_zero = min_t(size_t, start - addr, remains); in vmap_ram_vread_iter()
4347 addr += zeroed; in vmap_ram_vread_iter()
4355 offset = offset_in_page(addr); in vmap_ram_vread_iter()
4362 addr += copied; in vmap_ram_vread_iter()
4404 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) in vread_iter() argument
4413 addr = kasan_reset_tag(addr); in vread_iter()
4416 if ((unsigned long) addr + count < count) in vread_iter()
4417 count = -(unsigned long) addr; in vread_iter()
4421 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); in vread_iter()
4426 if ((unsigned long)addr + remains <= va->va_start) in vread_iter()
4455 if (addr >= vaddr + size) in vread_iter()
4458 if (addr < vaddr) { in vread_iter()
4459 size_t to_zero = min_t(size_t, vaddr - addr, remains); in vread_iter()
4462 addr += zeroed; in vread_iter()
4469 n = vaddr + size - addr; in vread_iter()
4474 copied = vmap_ram_vread_iter(iter, addr, n, flags); in vread_iter()
4476 copied = aligned_vread_iter(iter, addr, n); in vread_iter()
4480 addr += copied; in vread_iter()
4582 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
4586 addr, pgoff, in remap_vmalloc_range()
4594 ret = remove_vm_area(area->addr); in free_vm_area()
4616 pvm_find_va_enclose_addr(unsigned long addr) in pvm_find_va_enclose_addr() argument
4626 if (tmp->va_start <= addr) { in pvm_find_va_enclose_addr()
4628 if (tmp->va_end >= addr) in pvm_find_va_enclose_addr()
4654 unsigned long addr; in pvm_determine_end_from_reverse() local
4659 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
4660 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
4661 return addr; in pvm_determine_end_from_reverse()
4851 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
4957 unsigned long addr; in vmalloc_dump_obj() local
4960 addr = PAGE_ALIGN((unsigned long) object); in vmalloc_dump_obj()
4961 vn = addr_to_node(addr); in vmalloc_dump_obj()
4966 va = __find_vmap_area(addr, &vn->busy.root); in vmalloc_dump_obj()
4973 addr = (unsigned long) vm->addr; in vmalloc_dump_obj()
4979 nr_pages, addr, caller); in vmalloc_dump_obj()
5057 v->addr, v->addr + v->size, v->size); in vmalloc_info_show()
5129 if ((unsigned long) busy->addr - vmap_start > 0) { in vmap_init_free_space()
5133 free->va_end = (unsigned long) busy->addr; in vmap_init_free_space()
5141 vmap_start = (unsigned long) busy->addr + busy->size; in vmap_init_free_space()
5273 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()