Lines Matching refs:area

2476 	struct vm_struct *area;  in __get_vm_area_node()  local
2488 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2489 if (unlikely(!area)) in __get_vm_area_node()
2497 kfree(area); in __get_vm_area_node()
2501 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2512 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
2515 return area; in __get_vm_area_node()
2609 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2615 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2616 if (page_address(area->pages[i])) in set_area_direct_map()
2617 set_direct_map(area->pages[i]); in set_area_direct_map()
2623 static void vm_reset_perms(struct vm_struct *area) in vm_reset_perms() argument
2626 unsigned int page_order = vm_area_page_order(area); in vm_reset_perms()
2634 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_reset_perms()
2635 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_reset_perms()
2652 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_reset_perms()
2654 set_area_direct_map(area, set_direct_map_default_noflush); in vm_reset_perms()
2797 struct vm_struct *area; in vmap() local
2817 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2818 if (!area) in vmap()
2821 addr = (unsigned long)area->addr; in vmap()
2824 vunmap(area->addr); in vmap()
2829 area->pages = pages; in vmap()
2830 area->nr_pages = count; in vmap()
2832 return area->addr; in vmap()
2865 struct vm_struct *area; in vmap_pfn() local
2867 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2869 if (!area) in vmap_pfn()
2871 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2873 free_vm_area(area); in vmap_pfn()
2876 return area->addr; in vmap_pfn()
2973 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
2979 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
2980 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
2994 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
2995 area->caller); in __vmalloc_area_node()
2997 area->pages = kmalloc_node(array_size, nested_gfp, node); in __vmalloc_area_node()
3000 if (!area->pages) { in __vmalloc_area_node()
3004 free_vm_area(area); in __vmalloc_area_node()
3008 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
3009 page_order = vm_area_page_order(area); in __vmalloc_area_node()
3011 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, in __vmalloc_area_node()
3012 node, page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
3014 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
3018 for (i = 0; i < area->nr_pages; i++) in __vmalloc_area_node()
3019 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); in __vmalloc_area_node()
3026 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
3029 area->nr_pages * PAGE_SIZE, page_order); in __vmalloc_area_node()
3043 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3057 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3061 return area->addr; in __vmalloc_area_node()
3064 vfree(area->addr); in __vmalloc_area_node()
3101 struct vm_struct *area; in __vmalloc_node_range() local
3141 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | in __vmalloc_node_range()
3144 if (!area) { in __vmalloc_node_range()
3181 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range()
3198 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); in __vmalloc_node_range()
3205 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
3209 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3211 return area->addr; in __vmalloc_node_range()
3656 struct vm_struct *area; in remap_vmalloc_range_partial() local
3668 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3669 if (!area) in remap_vmalloc_range_partial()
3672 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3676 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3721 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3724 ret = remove_vm_area(area->addr); in free_vm_area()
3725 BUG_ON(ret != area); in free_vm_area()
3726 kfree(area); in free_vm_area()
3830 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3836 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3837 start = offsets[area]; in pcpu_get_vm_areas()
3838 end = start + sizes[area]; in pcpu_get_vm_areas()
3841 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3842 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3846 last_area = area; in pcpu_get_vm_areas()
3848 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3867 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3868 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3869 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3870 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3877 area = term_area = last_area; in pcpu_get_vm_areas()
3878 start = offsets[area]; in pcpu_get_vm_areas()
3879 end = start + sizes[area]; in pcpu_get_vm_areas()
3904 term_area = area; in pcpu_get_vm_areas()
3914 term_area = area; in pcpu_get_vm_areas()
3922 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3923 if (area == term_area) in pcpu_get_vm_areas()
3926 start = offsets[area]; in pcpu_get_vm_areas()
3927 end = start + sizes[area]; in pcpu_get_vm_areas()
3932 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3935 start = base + offsets[area]; in pcpu_get_vm_areas()
3936 size = sizes[area]; in pcpu_get_vm_areas()
3951 va = vas[area]; in pcpu_get_vm_areas()
3959 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3960 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3966 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3967 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3969 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3980 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
3981 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
3982 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas()
3994 while (area--) { in pcpu_get_vm_areas()
3995 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3996 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3997 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4002 vas[area] = NULL; in pcpu_get_vm_areas()
4012 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4013 if (vas[area]) in pcpu_get_vm_areas()
4016 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
4018 if (!vas[area]) in pcpu_get_vm_areas()
4026 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4027 if (vas[area]) in pcpu_get_vm_areas()
4028 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
4030 kfree(vms[area]); in pcpu_get_vm_areas()
4044 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4045 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4046 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4047 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4052 vas[area] = NULL; in pcpu_get_vm_areas()
4053 kfree(vms[area]); in pcpu_get_vm_areas()