Lines Matching refs:old_len

572 		unsigned long old_addr, unsigned long old_len,  in move_vma()  argument
577 long to_account = new_len - old_len; in move_vma()
603 if (!err && vma->vm_end != old_addr + old_len) in move_vma()
604 err = vma->vm_ops->may_split(vma, old_addr + old_len); in move_vma()
616 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
635 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
637 if (moved_len < old_len) { in move_vma()
652 old_len = new_len; in move_vma()
668 if (vma->vm_end > old_addr + old_len) in move_vma()
697 vma->vm_end == (old_addr + old_len)) in move_vma()
705 if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { in move_vma()
708 vm_acct_memory(old_len >> PAGE_SHIFT); in move_vma()
734 unsigned long old_len, unsigned long new_len, unsigned long flags) in vma_to_resize() argument
752 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in vma_to_resize()
762 if (old_len > vma->vm_end - addr) in vma_to_resize()
765 if (new_len == old_len) in vma_to_resize()
777 if (mlock_future_check(mm, vma->vm_flags, new_len - old_len)) in vma_to_resize()
781 (new_len - old_len) >> PAGE_SHIFT)) in vma_to_resize()
787 static unsigned long mremap_to(unsigned long addr, unsigned long old_len, in mremap_to() argument
805 if (addr + old_len > new_addr && new_addr + new_len > addr) in mremap_to()
831 if (old_len > new_len) { in mremap_to()
832 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); in mremap_to()
835 old_len = new_len; in mremap_to()
838 vma = vma_to_resize(addr, old_len, new_len, flags); in mremap_to()
846 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { in mremap_to()
867 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to()
895 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, in SYSCALL_DEFINE5() argument
931 (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) in SYSCALL_DEFINE5()
938 old_len = PAGE_ALIGN(old_len); in SYSCALL_DEFINE5()
960 old_len = ALIGN(old_len, huge_page_size(h)); in SYSCALL_DEFINE5()
973 if (new_len > old_len) in SYSCALL_DEFINE5()
978 ret = mremap_to(addr, old_len, new_addr, new_len, in SYSCALL_DEFINE5()
990 if (old_len >= new_len) { in SYSCALL_DEFINE5()
995 old_len - new_len, &uf_unmap, true); in SYSCALL_DEFINE5()
999 } else if (retval < 0 && old_len != new_len) { in SYSCALL_DEFINE5()
1011 vma = vma_to_resize(addr, old_len, new_len, flags); in SYSCALL_DEFINE5()
1019 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
1021 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
1022 long pages = (new_len - old_len) >> PAGE_SHIFT; in SYSCALL_DEFINE5()
1023 unsigned long extension_start = addr + old_len; in SYSCALL_DEFINE5()
1096 ret = move_vma(vma, addr, old_len, new_len, new_addr, in SYSCALL_DEFINE5()
1106 if (locked && new_len > old_len) in SYSCALL_DEFINE5()
1107 mm_populate(new_addr + old_len, new_len - old_len); in SYSCALL_DEFINE5()
1109 mremap_userfaultfd_complete(&uf, addr, ret, old_len); in SYSCALL_DEFINE5()