Searched refs:src_vma (Results 1 – 9 of 9) sorted by relevance
| /linux/mm/ |
| A D | userfaultfd.c | 1026 struct vm_area_struct *src_vma, in move_present_pte() argument 1096 struct vm_area_struct *src_vma, in move_zeropage_pte() argument 1128 struct vm_area_struct *src_vma, in move_pages_pte() argument 1378 if (!(src_vma->vm_flags & VM_WRITE)) in validate_move_areas() 1486 vma_end_read(src_vma); in uffd_move_unlock() 1487 if (src_vma != dst_vma) in uffd_move_unlock() 1511 mmap_assert_locked(src_vma->vm_mm); in uffd_move_unlock() 1628 if (src_vma->vm_flags & VM_SHARED) in move_pages() 1710 dst_pmdval, dst_vma, src_vma, in move_pages() 1731 dst_vma, src_vma, in move_pages() [all …]
|
| A D | memory.c | 962 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes() 1002 if (src_vma->vm_flags & VM_SHARED) in copy_present_ptes() 1012 nr, src_vma))) { in copy_present_ptes() 1023 pte = pte_mkwrite(pte, src_vma); in copy_present_ptes() 1145 dst_vma, src_vma, in copy_pte_range() 1246 addr, dst_vma, src_vma); in copy_pmd_range() 1343 if (src_vma->anon_vma) in vma_needs_copy() 1361 unsigned long end = src_vma->vm_end; in copy_page_range() 1371 if (is_vm_hugetlb_page(src_vma)) in copy_page_range() 1379 ret = track_pfn_copy(src_vma); in copy_page_range() [all …]
|
| A D | hugetlb.c | 5208 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument 5213 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range() 5214 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() 5223 src_vma->vm_start, in copy_hugetlb_page_range() 5224 src_vma->vm_end); in copy_hugetlb_page_range() 5226 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range() 5235 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range() 5239 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range() 5241 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range() 5291 if (userfaultfd_wp(src_vma) && uffd_wp) in copy_hugetlb_page_range() [all …]
|
| A D | huge_memory.c | 1568 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1591 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd() 1660 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { in copy_huge_pmd() 1666 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); in copy_huge_pmd() 2378 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument 2394 vma_assert_locked(src_vma); in move_pages_huge_pmd() 2428 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd() 2470 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd() 2485 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd()
|
| /linux/include/linux/ |
| A D | rmap.h | 392 struct page *page, int nr_pages, struct vm_area_struct *src_vma, in __folio_try_dup_anon_rmap() argument 410 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap() 476 struct page *page, int nr_pages, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_ptes() argument 478 return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, in folio_try_dup_anon_rmap_ptes() 483 struct page *page, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pte() argument 485 return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, in folio_try_dup_anon_rmap_pte() 512 struct page *page, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pmd() argument 515 return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, in folio_try_dup_anon_rmap_pmd()
|
| A D | userfaultfd_k.h | 145 struct vm_area_struct *src_vma,
|
| A D | huge_mm.h | 14 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| A D | hugetlb.h | 325 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
|
| A D | mm.h | 2385 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
Completed in 55 milliseconds