Lines Matching refs:area

332 	struct vm_struct *area;  in ioremap_page_range()  local
334 area = find_vm_area((void *)addr); in ioremap_page_range()
335 if (!area || !(area->flags & VM_IOREMAP)) { in ioremap_page_range()
339 if (addr != (unsigned long)area->addr || in ioremap_page_range()
340 (void *)end != area->addr + get_vm_area_size(area)) { in ioremap_page_range()
342 addr, end, (long)area->addr, in ioremap_page_range()
343 (long)area->addr + get_vm_area_size(area)); in ioremap_page_range()
666 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, in check_sparse_vm_area() argument
670 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) in check_sparse_vm_area()
672 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) in check_sparse_vm_area()
674 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) in check_sparse_vm_area()
678 if (start < (unsigned long)area->addr || in check_sparse_vm_area()
679 (void *)end > area->addr + get_vm_area_size(area)) in check_sparse_vm_area()
691 int vm_area_map_pages(struct vm_struct *area, unsigned long start, in vm_area_map_pages() argument
696 err = check_sparse_vm_area(area, start, end); in vm_area_map_pages()
709 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, in vm_area_unmap_pages() argument
712 if (check_sparse_vm_area(area, start, end)) in vm_area_unmap_pages()
3094 struct vm_struct *area; in __get_vm_area_node() local
3106 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
3107 if (unlikely(!area)) in __get_vm_area_node()
3113 area->flags = flags; in __get_vm_area_node()
3114 area->caller = caller; in __get_vm_area_node()
3116 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3118 kfree(area); in __get_vm_area_node()
3131 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
3134 return area; in __get_vm_area_node()
3228 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
3234 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
3235 if (page_address(area->pages[i])) in set_area_direct_map()
3236 set_direct_map(area->pages[i]); in set_area_direct_map()
3242 static void vm_reset_perms(struct vm_struct *area) in vm_reset_perms() argument
3245 unsigned int page_order = vm_area_page_order(area); in vm_reset_perms()
3253 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_reset_perms()
3254 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_reset_perms()
3271 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_reset_perms()
3273 set_area_direct_map(area, set_direct_map_default_noflush); in vm_reset_perms()
3416 struct vm_struct *area; in vmap() local
3436 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
3437 if (!area) in vmap()
3440 addr = (unsigned long)area->addr; in vmap()
3443 vunmap(area->addr); in vmap()
3448 area->pages = pages; in vmap()
3449 area->nr_pages = count; in vmap()
3451 return area->addr; in vmap()
3490 struct vm_struct *area; in vmap_pfn() local
3492 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
3494 if (!area) in vmap_pfn()
3496 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
3498 free_vm_area(area); in vmap_pfn()
3502 flush_cache_vmap((unsigned long)area->addr, in vmap_pfn()
3503 (unsigned long)area->addr + count * PAGE_SIZE); in vmap_pfn()
3505 return area->addr; in vmap_pfn()
3600 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
3606 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
3607 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
3621 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
3622 area->caller); in __vmalloc_area_node()
3624 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); in __vmalloc_area_node()
3627 if (!area->pages) { in __vmalloc_area_node()
3631 free_vm_area(area); in __vmalloc_area_node()
3635 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
3636 page_order = vm_area_page_order(area); in __vmalloc_area_node()
3646 area->nr_pages = vm_area_alloc_pages((page_order ? in __vmalloc_area_node()
3648 node, page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
3650 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
3654 for (i = 0; i < area->nr_pages; i++) in __vmalloc_area_node()
3655 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); in __vmalloc_area_node()
3662 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
3676 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3690 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3704 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3708 return area->addr; in __vmalloc_area_node()
3711 vfree(area->addr); in __vmalloc_area_node()
3748 struct vm_struct *area; in __vmalloc_node_range_noprof() local
3788 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | in __vmalloc_node_range_noprof()
3791 if (!area) { in __vmalloc_node_range_noprof()
3828 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range_noprof()
3845 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); in __vmalloc_node_range_noprof()
3852 clear_vm_uninitialized_flag(area); in __vmalloc_node_range_noprof()
3856 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range_noprof()
3858 return area->addr; in __vmalloc_node_range_noprof()
4442 struct vm_struct *area; in remap_vmalloc_range_partial() local
4454 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
4455 if (!area) in remap_vmalloc_range_partial()
4458 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
4462 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
4507 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
4510 ret = remove_vm_area(area->addr); in free_vm_area()
4511 BUG_ON(ret != area); in free_vm_area()
4512 kfree(area); in free_vm_area()
4616 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
4622 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4623 start = offsets[area]; in pcpu_get_vm_areas()
4624 end = start + sizes[area]; in pcpu_get_vm_areas()
4627 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
4628 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
4632 last_area = area; in pcpu_get_vm_areas()
4634 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
4653 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4654 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
4655 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
4656 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
4663 area = term_area = last_area; in pcpu_get_vm_areas()
4664 start = offsets[area]; in pcpu_get_vm_areas()
4665 end = start + sizes[area]; in pcpu_get_vm_areas()
4690 term_area = area; in pcpu_get_vm_areas()
4700 term_area = area; in pcpu_get_vm_areas()
4708 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
4709 if (area == term_area) in pcpu_get_vm_areas()
4712 start = offsets[area]; in pcpu_get_vm_areas()
4713 end = start + sizes[area]; in pcpu_get_vm_areas()
4718 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4721 start = base + offsets[area]; in pcpu_get_vm_areas()
4722 size = sizes[area]; in pcpu_get_vm_areas()
4736 va = vas[area]; in pcpu_get_vm_areas()
4744 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4745 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
4750 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4751 struct vmap_node *vn = addr_to_node(vas[area]->va_start); in pcpu_get_vm_areas()
4754 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); in pcpu_get_vm_areas()
4755 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
4766 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
4767 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
4768 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas()
4780 while (area--) { in pcpu_get_vm_areas()
4781 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4782 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4783 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4788 vas[area] = NULL; in pcpu_get_vm_areas()
4798 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4799 if (vas[area]) in pcpu_get_vm_areas()
4802 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
4804 if (!vas[area]) in pcpu_get_vm_areas()
4812 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4813 if (vas[area]) in pcpu_get_vm_areas()
4814 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
4816 kfree(vms[area]); in pcpu_get_vm_areas()
4830 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4831 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4832 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4833 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4838 vas[area] = NULL; in pcpu_get_vm_areas()
4839 kfree(vms[area]); in pcpu_get_vm_areas()