| /linux/include/linux/ |
| A D | pagemap.h | 22 pgoff_t start, pgoff_t end); 32 pgoff_t start, pgoff_t end); 768 pgoff_t index) in filemap_get_folio() 786 pgoff_t index) in filemap_lock_folio() 804 pgoff_t index) in filemap_grab_folio() 847 pgoff_t index) in find_lock_page() 893 pgoff_t index) in grab_cache_page_nowait() 977 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 982 pgoff_t index); 1060 pgoff_t pgoff; in linear_page_index() [all …]
|
| A D | shmem_fs.h | 36 pgoff_t fallocend; /* highest fallocate endindex */ 109 pgoff_t index, gfp_t gfp_mask); 115 struct vm_area_struct *vma, pgoff_t index, 119 struct vm_area_struct *vma, pgoff_t index, in shmem_allowable_huge_orders() 135 pgoff_t start, pgoff_t end); 146 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end, 149 pgoff_t index, gfp_t gfp); 152 pgoff_t index) in shmem_read_folio() 158 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() 180 static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) in shmem_fallocend()
|
| A D | swapops.h | 86 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) in swp_entry() 107 static inline pgoff_t swp_offset(swp_entry_t entry) in swp_offset() 168 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) in make_readable_device_private_entry() 173 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) in make_writable_device_private_entry() 189 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) in make_readable_device_exclusive_entry() 210 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) in make_readable_device_private_entry() 215 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) in make_writable_device_private_entry() 274 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) in make_readable_migration_entry() 284 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) in make_writable_migration_entry() 339 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) in make_readable_migration_entry() [all …]
|
| A D | dax.h | 28 long (*direct_access)(struct dax_device *, pgoff_t, long, 37 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 42 size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, 69 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 125 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) in dax_recovery_write() 175 …ct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) in dax_layout_busy_page_range() 231 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 235 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 237 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 249 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); [all …]
|
| A D | mempolicy.h | 117 pgoff_t start, end; 127 pgoff_t idx); 131 unsigned long addr, pgoff_t *ilx); 133 unsigned long addr, int order, pgoff_t *ilx); 219 mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) in mpol_shared_policy_lookup() 225 unsigned long addr, int order, pgoff_t *ilx) in get_vma_policy()
|
| A D | memfd.h | 9 struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); 15 static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) in memfd_alloc_folio()
|
| /linux/mm/ |
| A D | mapping_dirty_helpers.c | 67 pgoff_t bitmap_pgoff; 69 pgoff_t start; 70 pgoff_t end; 97 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + in clean_record_pte() 264 pgoff_t first_index, pgoff_t nr) in wp_shared_mapping_range() 314 pgoff_t first_index, pgoff_t nr, in clean_record_shared_mapping_range() 315 pgoff_t bitmap_pgoff, in clean_record_shared_mapping_range() 317 pgoff_t *start, in clean_record_shared_mapping_range() 318 pgoff_t *end) in clean_record_shared_mapping_range()
|
| A D | truncate.c | 32 pgoff_t index, void *entry) in __clear_shadow_entry() 97 pgoff_t index = indices[i]; in truncate_folio_batch_exceptionals() 308 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() 310 pgoff_t indices[PAGEVEC_SIZE]; in truncate_inode_pages_range() 311 pgoff_t index; in truncate_inode_pages_range() 473 pgoff_t start, pgoff_t end, unsigned long *nr_failed) in mapping_try_invalidate() 475 pgoff_t indices[PAGEVEC_SIZE]; in mapping_try_invalidate() 477 pgoff_t index = start; in mapping_try_invalidate() 536 pgoff_t start, pgoff_t end) in invalidate_mapping_pages() 599 pgoff_t start, pgoff_t end) in invalidate_inode_pages2_range() [all …]
|
| A D | swap.h | 46 static inline pgoff_t swap_cache_index(swp_entry_t entry) in swap_cache_index() 66 pgoff_t index); 72 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 75 struct mempolicy *mpol, pgoff_t ilx); 123 static inline pgoff_t swap_cache_index(swp_entry_t entry) in swap_cache_index() 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 161 pgoff_t index) in filemap_get_incore_folio()
|
| A D | readahead.c | 308 pgoff_t end_index; /* The last page we want to read */ in do_page_cache_ra() 431 pgoff_t mark, unsigned int order, gfp_t gfp) in ra_alloc_folio() 456 pgoff_t start = readahead_index(ractl); in page_cache_ra_order() 457 pgoff_t index = start; in page_cache_ra_order() 459 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order() 460 pgoff_t mark = index + ra->size - ra->async_size; in page_cache_ra_order() 541 pgoff_t index = readahead_index(ractl); in page_cache_sync_ra() 545 pgoff_t prev_index, miss; in page_cache_sync_ra() 617 pgoff_t index = readahead_index(ractl); in page_cache_async_ra() 618 pgoff_t expected, start; in page_cache_async_ra() [all …]
|
| A D | filemap.c | 2041 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) in find_get_entries() 2090 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) in find_lock_entries() 2180 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) in filemap_get_folios_contig() 2322 pgoff_t index, pgoff_t max, struct folio_batch *fbatch) in filemap_get_read_batch() 2470 pgoff_t index; in filemap_create_folio() 2513 pgoff_t last_index) in filemap_readahead() 2530 pgoff_t last_index; in filemap_get_pages() 3463 pgoff_t start) in filemap_map_pmd() 3631 pgoff_t start_pgoff, pgoff_t end_pgoff) in filemap_map_pages() 3921 pgoff_t index, in read_cache_page_gfp() [all …]
|
| A D | folio-compat.c | 69 pgoff_t index, gfp_t gfp) in add_to_page_cache_lru() 76 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page() 89 pgoff_t index) in grab_cache_page_write_begin()
|
| A D | swap_cgroup.c | 66 pgoff_t offset) in __lookup_swap_cgroup() 79 pgoff_t offset = swp_offset(ent); in lookup_swap_cgroup() 133 pgoff_t offset = swp_offset(ent); in swap_cgroup_record() 134 pgoff_t end = offset + nr_ents; in swap_cgroup_record()
|
| A D | shmem.c | 881 pgoff_t start, pgoff_t end) in shmem_partial_swap_usage() 1003 pgoff_t index; in shmem_undo_range() 1457 pgoff_t index; in shmem_writepage() 1615 pgoff_t index, unsigned int order, pgoff_t *ilx); 1621 pgoff_t ilx; in shmem_swapin_cluster() 1765 pgoff_t ilx; in shmem_alloc_folio() 2663 pgoff_t index; in shmem_get_policy() 2677 pgoff_t index, unsigned int order, pgoff_t *ilx) in shmem_get_pgoff_policy() 2689 pgoff_t index, unsigned int order, pgoff_t *ilx) in shmem_get_pgoff_policy() 2914 pgoff_t max_off; in shmem_mfill_atomic_pte() [all …]
|
| A D | internal.h | 399 struct file *file, pgoff_t index, unsigned long nr_to_read) in force_page_cache_readahead() 405 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 406 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 407 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 408 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 415 pgoff_t start, pgoff_t end, unsigned long *nr_failed); 874 pgoff_t pgoff, addr; in folio_within_range() 963 pgoff_t pgoff, unsigned long nr_pages) in vma_address() 989 pgoff_t pgoff; in vma_address_end() 1370 pgoff_t pgoff) in vma_set_range()
|
| A D | swap_state.c | 77 pgoff_t idx = swap_cache_index(entry); in get_shadow_from_swap_cache() 94 pgoff_t idx = swap_cache_index(entry); in add_to_swap_cache() 149 pgoff_t idx = swap_cache_index(entry); in __delete_from_swap_cache() 405 pgoff_t index) in filemap_get_incore_folio() 433 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() 564 pgoff_t ilx; in read_swap_cache_async() 657 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 800 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() 809 pgoff_t ilx; in swap_vma_readahead() 876 pgoff_t ilx; in swapin_readahead()
|
| /linux/include/trace/events/ |
| A D | fs_dax.h | 12 pgoff_t max_pgoff, int result), 20 __field(pgoff_t, pgoff) 21 __field(pgoff_t, max_pgoff) 160 __field(pgoff_t, pgoff) 228 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index), 232 __field(pgoff_t, start_index) 233 __field(pgoff_t, end_index) 253 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\ 260 TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen), 264 __field(pgoff_t, pgoff) [all …]
|
| A D | filemap.h | 63 pgoff_t index, 64 pgoff_t last_index 99 pgoff_t index, 100 pgoff_t last_index 108 pgoff_t index, 109 pgoff_t last_index 115 TP_PROTO(struct address_space *mapping, pgoff_t index),
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| A D | vmwgfx_page_dirty.c | 85 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_pagetable() 87 pgoff_t num_marked; in vmw_bo_dirty_scan_pagetable() 125 pgoff_t num_marked; in vmw_bo_dirty_scan_mkwrite() 141 pgoff_t start = 0; in vmw_bo_dirty_scan_mkwrite() 142 pgoff_t end = dirty->bitmap_size; in vmw_bo_dirty_scan_mkwrite() 185 pgoff_t start, pgoff_t end) in vmw_bo_dirty_pre_unmap() 210 pgoff_t start, pgoff_t end) in vmw_bo_dirty_unmap() 233 pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); in vmw_bo_dirty_add() 311 pgoff_t start, cur, end; in vmw_bo_dirty_transfer_to_res() 422 pgoff_t num_prefault; in vmw_bo_vm_fault() [all …]
|
| /linux/drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_object.h | 378 pgoff_t n, 402 static_assert(castable_to_type(n, pgoff_t)); \ 425 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n, in __i915_gem_object_get_sg() 447 static_assert(castable_to_type(n, pgoff_t)); \ 470 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n, in __i915_gem_object_get_sg_dma() 492 static_assert(castable_to_type(n, pgoff_t)); \ 512 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n); 528 static_assert(castable_to_type(n, pgoff_t)); \ 563 static_assert(castable_to_type(n, pgoff_t)); \ 601 static_assert(castable_to_type(n, pgoff_t)); \ [all …]
|
| /linux/virt/kvm/ |
| A D | guest_memfd.c | 23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) in folio_file_pfn() 29 pgoff_t index, struct folio *folio) in __kvm_gmem_prepare_folio() 60 pgoff_t index; in kvm_gmem_prepare_folio() 106 pgoff_t end) in kvm_gmem_invalidate_begin() 114 pgoff_t pgoff = slot->gmem.pgoff; in kvm_gmem_invalidate_begin() 141 pgoff_t end) in kvm_gmem_invalidate_end() 155 pgoff_t start = offset >> PAGE_SHIFT; in kvm_gmem_punch_hole() 156 pgoff_t end = (offset + len) >> PAGE_SHIFT; in kvm_gmem_punch_hole() 181 pgoff_t start, index, end; in kvm_gmem_allocate() 328 pgoff_t start, end; in kvm_gmem_error_folio() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| A D | xe_pt_walk.h | 59 typedef int (*xe_pt_entry_fn)(struct xe_ptw *parent, pgoff_t offset, 120 static inline pgoff_t 139 static inline pgoff_t
|
| A D | xe_pt_walk.c | 36 static bool xe_pt_next(pgoff_t *offset, u64 *addr, u64 next, u64 end, in xe_pt_next() 39 pgoff_t step = 1; in xe_pt_next() 76 pgoff_t offset = xe_pt_offset(addr, level, walk); in xe_pt_walk_range()
|
| /linux/fs/nilfs2/ |
| A D | page.c | 29 unsigned long block, pgoff_t index, int blkbits, in __nilfs_get_folio_block() 52 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); in nilfs_grab_buffer() 249 pgoff_t index = 0; in nilfs_copy_dirty_pages() 254 if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1, in nilfs_copy_dirty_pages() 304 pgoff_t start = 0; in nilfs_copy_back_pages() 314 pgoff_t index = folio->index; in nilfs_copy_back_pages() 366 pgoff_t index = 0; in nilfs_clear_dirty_pages() 370 while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1, in nilfs_clear_dirty_pages() 488 pgoff_t index; in nilfs_find_uncommitted_extent()
|
| /linux/drivers/md/ |
| A D | dm-linear.c | 160 static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) in linear_dax_pgoff() 169 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, in linear_dax_direct_access() 178 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, in linear_dax_zero_page_range() 186 static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, in linear_dax_recovery_write()
|