Lines Matching refs:region

424 	struct vm_region *region, *last;  in validate_nommu_regions()  local
436 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
439 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
440 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
441 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
455 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument
467 if (region->vm_start < pregion->vm_start) in add_nommu_region()
469 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
471 else if (pregion == region) in add_nommu_region()
477 rb_link_node(&region->vm_rb, parent, p); in add_nommu_region()
478 rb_insert_color(&region->vm_rb, &nommu_region_tree); in add_nommu_region()
486 static void delete_nommu_region(struct vm_region *region) in delete_nommu_region() argument
491 rb_erase(&region->vm_rb, &nommu_region_tree); in delete_nommu_region()
514 static void __put_nommu_region(struct vm_region *region) in __put_nommu_region() argument
519 if (--region->vm_usage == 0) { in __put_nommu_region()
520 if (region->vm_top > region->vm_start) in __put_nommu_region()
521 delete_nommu_region(region); in __put_nommu_region()
524 if (region->vm_file) in __put_nommu_region()
525 fput(region->vm_file); in __put_nommu_region()
529 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
530 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
531 kmem_cache_free(vm_region_jar, region); in __put_nommu_region()
540 static void put_nommu_region(struct vm_region *region) in put_nommu_region() argument
543 __put_nommu_region(region); in put_nommu_region()
962 struct vm_region *region, in do_mmap_private() argument
1009 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1010 region->vm_start = (unsigned long) base; in do_mmap_private()
1011 region->vm_end = region->vm_start + len; in do_mmap_private()
1012 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
1014 vma->vm_start = region->vm_start; in do_mmap_private()
1015 vma->vm_end = region->vm_start + len; in do_mmap_private()
1039 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
1040 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1041 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1042 region->vm_top = 0; in do_mmap_private()
1065 struct vm_region *region; in do_mmap() local
1089 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); in do_mmap()
1090 if (!region) in do_mmap()
1097 region->vm_usage = 1; in do_mmap()
1098 region->vm_flags = vm_flags; in do_mmap()
1099 region->vm_pgoff = pgoff; in do_mmap()
1105 region->vm_file = get_file(file); in do_mmap()
1177 fput(region->vm_file); in do_mmap()
1178 kmem_cache_free(vm_region_jar, region); in do_mmap()
1179 region = pregion; in do_mmap()
1205 vma->vm_start = region->vm_start = addr; in do_mmap()
1206 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1211 vma->vm_region = region; in do_mmap()
1219 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1222 add_nommu_region(region); in do_mmap()
1228 memset((void *)region->vm_start, 0, in do_mmap()
1229 region->vm_end - region->vm_start); in do_mmap()
1241 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1242 flush_icache_user_range(region->vm_start, region->vm_end); in do_mmap()
1243 region->vm_icache_flushed = true; in do_mmap()
1253 if (region->vm_file) in do_mmap()
1254 fput(region->vm_file); in do_mmap()
1255 kmem_cache_free(vm_region_jar, region); in do_mmap()
1268 kmem_cache_free(vm_region_jar, region); in do_mmap()
1342 struct vm_region *region; in split_vma() local
1353 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); in split_vma()
1354 if (!region) in split_vma()
1359 kmem_cache_free(vm_region_jar, region); in split_vma()
1364 *region = *vma->vm_region; in split_vma()
1365 new->vm_region = region; in split_vma()
1370 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1372 region->vm_start = new->vm_start = addr; in split_vma()
1373 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1405 struct vm_region *region; in shrink_vma() local
1417 region = vma->vm_region; in shrink_vma()
1418 BUG_ON(region->vm_usage != 1); in shrink_vma()
1421 delete_nommu_region(region); in shrink_vma()
1422 if (from > region->vm_start) { in shrink_vma()
1423 to = region->vm_top; in shrink_vma()
1424 region->vm_top = region->vm_end = from; in shrink_vma()
1426 region->vm_start = to; in shrink_vma()
1428 add_nommu_region(region); in shrink_vma()
1744 struct vm_region *region; in nommu_shrink_inode_mappings() local
1775 region = vma->vm_region; in nommu_shrink_inode_mappings()
1776 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
1777 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
1780 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
1781 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
1782 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()