Lines Matching refs:mapping
1182 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map() local
1183 struct inode *inode = mapping->host; in vma_resv_map()
1676 struct folio, mapping); in free_hpage_workfn()
1678 folio->mapping = NULL; in free_hpage_workfn()
1715 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) in update_and_free_hugetlb_folio()
1840 folio->mapping = NULL; in free_huge_folio()
1932 struct address_space *mapping = folio_mapping(folio); in hugetlb_folio_mapping_lock_write() local
1934 if (!mapping) in hugetlb_folio_mapping_lock_write()
1935 return mapping; in hugetlb_folio_mapping_lock_write()
1937 if (i_mmap_trylock_write(mapping)) in hugetlb_folio_mapping_lock_write()
1938 return mapping; in hugetlb_folio_mapping_lock_write()
4037 new_folio->mapping = NULL; in demote_free_hugetlb_folios()
5784 struct address_space *mapping = vma->vm_file->f_mapping; in move_hugetlb_page_tables() local
5806 i_mmap_lock_write(mapping); in move_hugetlb_page_tables()
5836 i_mmap_unlock_write(mapping); in move_hugetlb_page_tables()
6082 struct address_space *mapping; in unmap_ref_private() local
6092 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
6099 i_mmap_lock_write(mapping); in unmap_ref_private()
6100 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private()
6125 i_mmap_unlock_write(mapping); in unmap_ref_private()
6227 struct address_space *mapping = vma->vm_file->f_mapping; in hugetlb_wp() local
6242 hash = hugetlb_fault_mutex_hash(mapping, idx); in hugetlb_wp()
6332 struct address_space *mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present() local
6336 folio = filemap_get_folio(mapping, idx); in hugetlbfs_pagecache_present()
6343 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, in hugetlb_add_to_page_cache() argument
6346 struct inode *inode = mapping->host; in hugetlb_add_to_page_cache()
6352 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); in hugetlb_add_to_page_cache()
6373 struct address_space *mapping, in hugetlb_handle_userfault() argument
6384 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); in hugetlb_handle_userfault()
6406 static vm_fault_t hugetlb_no_page(struct address_space *mapping, in hugetlb_no_page() argument
6409 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); in hugetlb_no_page()
6437 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); in hugetlb_no_page()
6439 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
6466 return hugetlb_handle_userfault(vmf, mapping, in hugetlb_no_page()
6501 int err = hugetlb_add_to_page_cache(folio, mapping, in hugetlb_no_page()
6542 return hugetlb_handle_userfault(vmf, mapping, in hugetlb_no_page()
6633 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) in hugetlb_fault_mutex_hash() argument
6638 key[0] = (unsigned long) mapping; in hugetlb_fault_mutex_hash()
6650 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) in hugetlb_fault_mutex_hash() argument
6663 struct address_space *mapping; in hugetlb_fault() local
6685 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
6686 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); in hugetlb_fault()
6725 return hugetlb_no_page(mapping, &vmf); in hugetlb_fault()
6885 struct address_space *mapping = dst_vma->vm_file->f_mapping; in hugetlb_mfill_atomic_pte() local
6916 folio = filemap_lock_hugetlb_folio(h, mapping, idx); in hugetlb_mfill_atomic_pte()
7007 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) in hugetlb_mfill_atomic_pte()
7016 ret = hugetlb_add_to_page_cache(folio, mapping, idx); in hugetlb_mfill_atomic_pte()
7540 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share() local
7548 i_mmap_lock_read(mapping); in huge_pmd_share()
7549 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { in huge_pmd_share()
7578 i_mmap_unlock_read(mapping); in huge_pmd_share()