| /include/linux/ |
| A D | pagemap.h | 248 if (mapping->host) in mapping_set_error() 270 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() 351 return mapping->gfp_mask; in mapping_gfp_mask() 435 mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | in mapping_set_folio_order_range() 530 atomic_inc(&mapping->nr_thps); in filemap_nr_thps_inc() 579 return folio->mapping->host; in folio_inode() 832 mapping_gfp_mask(mapping)); in filemap_grab_folio() 922 mapping_gfp_mask(mapping)); in grab_cache_page_nowait() 1295 if (!mapping->nrpages) in filemap_range_needs_writeback() 1335 .mapping = m, \ [all …]
|
| A D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 96 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 97 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 113 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 114 phys_addr = mapping->base + offset; in io_mapping_map_wc() 147 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 149 iounmap(mapping->iomem); in io_mapping_fini() [all …]
|
| A D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() 267 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 271 if (!mapping) { in __calc_tpm2_event_size() 276 mapping = marker; in __calc_tpm2_event_size() [all …]
|
| A D | secretmem.h | 9 static inline bool secretmem_mapping(struct address_space *mapping) in secretmem_mapping() argument 11 return mapping->a_ops == &secretmem_aops; in secretmem_mapping() 24 static inline bool secretmem_mapping(struct address_space *mapping) in secretmem_mapping() argument
|
| A D | shmem_fs.h | 102 bool shmem_mapping(struct address_space *mapping); 104 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 109 void shmem_unlock_mapping(struct address_space *mapping); 110 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 144 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 158 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 161 static inline struct folio *shmem_read_folio(struct address_space *mapping, in shmem_read_folio() argument 164 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); in shmem_read_folio() 168 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 170 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() [all …]
|
| A D | dax.h | 154 int dax_writeback_mapping_range(struct address_space *mapping, 157 struct page *dax_layout_busy_page(struct address_space *mapping); 161 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 163 void dax_unlock_mapping_entry(struct address_space *mapping, 184 if (IS_DAX(folio->mapping->host)) in dax_lock_folio() 199 static inline void dax_unlock_mapping_entry(struct address_space *mapping, in dax_unlock_mapping_entry() argument 264 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 265 void dax_delete_mapping_range(struct address_space *mapping, 267 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 285 static inline bool dax_mapping(struct address_space *mapping) in dax_mapping() argument [all …]
|
| A D | writeback.h | 354 void balance_dirty_pages_ratelimited(struct address_space *mapping); 355 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, 360 struct folio *writeback_iter(struct address_space *mapping, 366 int write_cache_pages(struct address_space *mapping, 369 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 371 void tag_pages_for_writeback(struct address_space *mapping, 374 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
| A D | migrate.h | 66 int migrate_folio(struct address_space *mapping, struct folio *dst, 75 int migrate_huge_page_move_mapping(struct address_space *mapping, 80 int folio_migrate_mapping(struct address_space *mapping, 98 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, in migrate_huge_page_move_mapping() argument
|
| A D | mpage.h | 20 int mpage_writepages(struct address_space *mapping,
|
| A D | fs.h | 547 down_write(&mapping->i_mmap_rwsem); in i_mmap_lock_write() 557 up_write(&mapping->i_mmap_rwsem); in i_mmap_unlock_write() 567 down_read(&mapping->i_mmap_rwsem); in i_mmap_lock_read() 572 up_read(&mapping->i_mmap_rwsem); in i_mmap_unlock_read() 615 atomic_dec(&mapping->i_mmap_writable); in mapping_unmap_writable() 626 atomic_inc(&mapping->i_mmap_writable); in mapping_allow_writable() 929 up_write(&mapping->invalidate_lock); in filemap_invalidate_unlock() 934 down_read(&mapping->invalidate_lock); in filemap_invalidate_lock_shared() 938 struct address_space *mapping) in filemap_invalidate_trylock_shared() argument 944 struct address_space *mapping) in filemap_invalidate_unlock_shared() argument [all …]
|
| A D | fscache.h | 176 struct address_space *mapping, 583 static inline void fscache_clear_page_bits(struct address_space *mapping, in fscache_clear_page_bits() argument 588 __fscache_clear_page_bits(mapping, start, len); in fscache_clear_page_bits() 617 struct address_space *mapping, in fscache_write_to_cache() argument 624 __fscache_write_to_cache(cookie, mapping, start, len, i_size, in fscache_write_to_cache()
|
| A D | netfs.h | 229 struct address_space *mapping; /* The mapping being accessed */ member 410 int netfs_writeback_single(struct address_space *mapping, 421 int netfs_writepages(struct address_space *mapping, 423 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); 461 int netfs_alloc_folioq_buffer(struct address_space *mapping,
|
| A D | nfs_page.h | 208 return req->wb_page->mapping->host; in nfs_page_to_inode() 209 return folio->mapping->host; in nfs_page_to_inode()
|
| A D | uacce.h | 101 struct address_space *mapping; member
|
| A D | buffer_head.h | 261 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 512 bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 521 int sync_mapping_buffers(struct address_space *mapping); 534 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } in sync_mapping_buffers() argument
|
| A D | nfs_fs.h | 437 extern int nfs_sync_mapping(struct address_space *mapping); 438 extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); 458 extern int nfs_clear_invalid_mapping(struct address_space *mapping); 460 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
|
| A D | backing-dev.h | 146 static inline bool mapping_can_writeback(struct address_space *mapping) in mapping_can_writeback() argument 148 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; in mapping_can_writeback()
|
| A D | mm_types.h | 108 struct address_space *mapping; member 398 struct address_space *mapping; member 489 FOLIO_MATCH(mapping, mapping); 591 TABLE_MATCH(mapping, __page_mapping);
|
| /include/trace/events/ |
| A D | filemap.h | 34 if (folio->mapping->host->i_sb) 62 struct address_space *mapping, 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 98 struct address_space *mapping, 107 struct address_space *mapping, 117 TP_ARGS(mapping, index), 127 if (mapping->host->i_sb) 129 mapping->host->i_sb->s_dev; 146 TP_ARGS(mapping, eseq), [all …]
|
| A D | page_ref.h | 24 __field(void *, mapping) 34 __entry->mapping = page->mapping; 43 __entry->mapcount, __entry->mapping, __entry->mt, 72 __field(void *, mapping) 83 __entry->mapping = page->mapping; 93 __entry->mapcount, __entry->mapping, __entry->mt,
|
| A D | writeback.h | 65 TP_PROTO(struct folio *folio, struct address_space *mapping), 67 TP_ARGS(folio, mapping), 77 bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : 79 __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0; 92 TP_PROTO(struct folio *folio, struct address_space *mapping), 94 TP_ARGS(folio, mapping) 99 TP_PROTO(struct folio *folio, struct address_space *mapping), 101 TP_ARGS(folio, mapping) 261 struct address_space *mapping = folio_mapping(folio); 262 struct inode *inode = mapping ? mapping->host : NULL;
|
| A D | erofs.h | 99 __entry->dev = folio->mapping->host->i_sb->s_dev; 100 __entry->nid = EROFS_I(folio->mapping->host)->nid; 101 __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
|
| /include/asm-generic/ |
| A D | cacheflush.h | 59 static inline void flush_dcache_mmap_lock(struct address_space *mapping) in flush_dcache_mmap_lock() argument 65 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) in flush_dcache_mmap_unlock() argument
|
| /include/uapi/drm/ |
| A D | tegra_drm.h | 759 __u32 mapping; member 775 __u32 mapping; member 793 __u32 mapping; member
|
| /include/drm/ttm/ |
| A D | ttm_device.h | 293 struct device *dev, struct address_space *mapping,
|