/linux-6.3-rc2/mm/ |
A D | truncate.c | 67 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() 77 dax = dax_mapping(mapping); in truncate_folio_batch_exceptionals() 117 if (shmem_mapping(mapping) || dax_mapping(mapping)) in invalidate_exceptional_entry() 131 if (shmem_mapping(mapping)) in invalidate_exceptional_entry2() 133 if (dax_mapping(mapping)) in invalidate_exceptional_entry2() 192 if (folio->mapping != mapping) in truncate_inode_folio() 258 if (!mapping) in generic_error_remove_page() 301 if (!mapping) in invalidate_inode_page() 573 if (folio->mapping != mapping) in invalidate_complete_folio2() 604 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder() [all …]
|
A D | filemap.c | 221 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local 252 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local 809 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local 819 new->mapping = mapping; in replace_page_cache_folio() 868 folio->mapping = mapping; in __filemap_add_folio() 1940 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio() 2117 if (folio->mapping != mapping || in find_lock_entries() 2987 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data() 3307 if (unlikely(folio->mapping != mapping)) { in filemap_fault() 3453 if (folio->mapping != mapping) in next_uptodate_page() [all …]
|
A D | readahead.c | 209 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 226 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 271 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded() 285 struct inode *inode = ractl->mapping->host; in do_page_cache_ra() 310 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local 315 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra() 503 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local 508 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order() 523 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order() 550 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order() [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | pagemap.h | 227 if (mapping->host) in mapping_set_error() 249 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() 274 return mapping->gfp_mask; in mapping_gfp_mask() 369 return folio->mapping; in folio_file_mapping() 400 return folio->mapping->host; in folio_inode() 566 mapping_gfp_mask(mapping)); in filemap_grab_folio() 655 mapping_gfp_mask(mapping)); in grab_cache_page_nowait() 754 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page() 1141 if (!mapping->nrpages) in filemap_range_needs_writeback() 1180 .mapping = m, \ [all …]
|
A D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 90 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 91 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 107 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 108 phys_addr = mapping->base + offset; in io_mapping_map_wc() 141 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 143 iounmap(mapping->iomem); in io_mapping_fini() [all …]
|
A D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() 267 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 271 if (!mapping) { in __calc_tpm2_event_size() 276 mapping = marker; in __calc_tpm2_event_size() [all …]
|
A D | shmem_fs.h | 79 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 81 return mapping->a_ops == &shmem_aops; in shmem_mapping() 84 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 89 extern void shmem_unlock_mapping(struct address_space *mapping); 98 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 112 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 115 static inline struct folio *shmem_read_folio(struct address_space *mapping, in shmem_read_folio() argument 118 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); in shmem_read_folio() 122 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 124 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() [all …]
|
A D | secretmem.h | 11 struct address_space *mapping; in page_is_secretmem() local 23 mapping = (struct address_space *) in page_is_secretmem() 24 ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); in page_is_secretmem() 26 if (!mapping || mapping != page->mapping) in page_is_secretmem() 29 return mapping->a_ops == &secretmem_aops; in page_is_secretmem()
|
A D | dax.h | 157 int dax_writeback_mapping_range(struct address_space *mapping, 160 struct page *dax_layout_busy_page(struct address_space *mapping); 164 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 166 void dax_unlock_mapping_entry(struct address_space *mapping, 179 static inline int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument 187 if (IS_DAX(page->mapping->host)) in dax_lock_page() 202 static inline void dax_unlock_mapping_entry(struct address_space *mapping, in dax_unlock_mapping_entry() argument 248 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 249 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 259 static inline bool dax_mapping(struct address_space *mapping) in dax_mapping() argument [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/panfrost/ |
A D | panfrost_gem.c | 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 95 kfree(mapping); in panfrost_gem_mapping_release() 100 if (!mapping) in panfrost_gem_mapping_put() 124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in panfrost_gem_open() 125 if (!mapping) in panfrost_gem_open() 131 mapping->obj = bo; in panfrost_gem_open() 146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode, in panfrost_gem_open() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/tegra/ |
A D | uapi.c | 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 206 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in tegra_drm_ioctl_channel_map() 207 if (!mapping) { in tegra_drm_ioctl_channel_map() 220 if (!mapping->bo) { in tegra_drm_ioctl_channel_map() 243 mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL); in tegra_drm_ioctl_channel_map() 249 mapping->iova = mapping->map->phys; in tegra_drm_ioctl_channel_map() 250 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; in tegra_drm_ioctl_channel_map() 266 kfree(mapping); in tegra_drm_ioctl_channel_map() 287 mapping = xa_erase(&context->mappings, args->mapping); in tegra_drm_ioctl_channel_unmap() [all …]
|
/linux-6.3-rc2/drivers/media/usb/uvc/ |
A D | uvc_ctrl.c | 1019 s32 value = mapping->get(mapping, UVC_GET_CUR, data); in __uvc_ctrl_get_value() 1226 return mapping->get(mapping, UVC_GET_RES, in uvc_get_ctrl_bitmap() 1230 return mapping->get(mapping, UVC_GET_MAX, in uvc_get_ctrl_bitmap() 1910 min = mapping->get(mapping, UVC_GET_MIN, in uvc_ctrl_set() 1912 max = mapping->get(mapping, UVC_GET_MAX, in uvc_ctrl_set() 1914 step = mapping->get(mapping, UVC_GET_RES, in uvc_ctrl_set() 1994 mapping->set(mapping, value, in uvc_ctrl_set() 2389 if (mapping->menu_mapping && mapping->menu_mask) { in __uvc_ctrl_add_mapping() 2397 if (mapping->menu_names && mapping->menu_mask) { in __uvc_ctrl_add_mapping() 2447 uvc_map_get_name(mapping), mapping->id); in uvc_ctrl_add_mapping() [all …]
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | dma-mapping.c | 823 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova() 1567 if (!mapping) in arm_iommu_create_mapping() 1582 mapping->base = base; in arm_iommu_create_mapping() 1592 return mapping; in arm_iommu_create_mapping() 1598 kfree(mapping); in arm_iommu_create_mapping() 1614 kfree(mapping); in release_iommu_mapping() 1621 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping() 1625 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping() 1637 if (mapping) in arm_iommu_release_mapping() 1697 if (!mapping) { in arm_iommu_detach_device() [all …]
|
A D | flush.c | 251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 273 struct address_space *mapping; in __sync_icache_dcache() local 284 mapping = page_mapping_file(page); in __sync_icache_dcache() 286 mapping = NULL; in __sync_icache_dcache() 289 __flush_dcache_page(mapping, page); in __sync_icache_dcache() 317 struct address_space *mapping; in flush_dcache_page() local 332 mapping = page_mapping_file(page); in flush_dcache_page() 335 mapping && !page_mapcount(page)) in flush_dcache_page() 339 if (mapping && cache_is_vivt()) in flush_dcache_page() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/exynos/ |
A D | exynos_drm_dma.c | 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 109 if (!priv->mapping) { in exynos_drm_register_dma() 110 void *mapping; in exynos_drm_register_dma() local 116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 118 mapping = ERR_PTR(-ENODEV); in exynos_drm_register_dma() 120 if (IS_ERR(mapping)) in exynos_drm_register_dma() 121 return PTR_ERR(mapping); in exynos_drm_register_dma() 122 priv->mapping = mapping; in exynos_drm_register_dma() 142 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_mmu.c | 143 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 147 mapping->context = NULL; in etnaviv_iommu_reap_mapping() 297 mapping->iova = iova; in etnaviv_iommu_map_gem() 305 node = &mapping->vram_node; in etnaviv_iommu_map_gem() 316 mapping->iova = node->start; in etnaviv_iommu_map_gem() 336 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem() 341 if (!mapping->context) { in etnaviv_iommu_unmap_gem() 418 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va() 419 mapping->use++; in etnaviv_iommu_get_suballoc_va() 455 mapping->use = 1; in etnaviv_iommu_get_suballoc_va() [all …]
|
A D | etnaviv_gem.c | 223 return mapping; in etnaviv_gem_get_vram_mapping() 235 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 252 if (mapping) { in etnaviv_gem_mapping_get() 271 if (mapping) in etnaviv_gem_mapping_get() 290 if (!mapping) { in etnaviv_gem_mapping_get() 291 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in etnaviv_gem_mapping_get() 292 if (!mapping) { in etnaviv_gem_mapping_get() 303 mapping->use = 1; in etnaviv_gem_mapping_get() 309 kfree(mapping); in etnaviv_gem_mapping_get() 321 return mapping; in etnaviv_gem_mapping_get() [all …]
|
/linux-6.3-rc2/fs/gfs2/ |
A D | aops.c | 128 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 153 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 211 struct inode *inode = mapping->host; in gfs2_write_jdata_batch() 234 if (unlikely(folio->mapping != mapping)) { in gfs2_write_jdata_batch() 376 mapping->writeback_index = done_index; in gfs2_write_cache_jdata() 396 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 400 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 452 struct inode *inode = folio->mapping->host; in gfs2_read_folio() 529 struct inode *inode = rac->mapping->host; in gfs2_readahead() 585 return block_dirty_folio(mapping, folio); in jdata_dirty_folio() [all …]
|
/linux-6.3-rc2/fs/ |
A D | dax.c | 353 if (page->mapping) in dax_page_share_get() 387 page->mapping = mapping; in dax_associate_entry() 410 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry() 411 page->mapping = NULL; in dax_disassociate_entry() 445 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local 448 if (!mapping || !dax_mapping(mapping)) in dax_lock_page() 464 if (mapping != page->mapping) { in dax_lock_page() 486 struct address_space *mapping = page->mapping; in dax_unlock_page() local 707 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range() 1674 .inode = mapping->host, in dax_iomap_pte_fault() [all …]
|
/linux-6.3-rc2/arch/nios2/mm/ |
A D | cacheflush.c | 82 flush_dcache_mmap_lock(mapping); in flush_aliases() 95 flush_dcache_mmap_unlock(mapping); in flush_aliases() 175 struct address_space *mapping; in flush_dcache_page() local 184 mapping = page_mapping_file(page); in flush_dcache_page() 187 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 191 if (mapping) { in flush_dcache_page() 193 flush_aliases(mapping, page); in flush_dcache_page() 207 struct address_space *mapping; in update_mmu_cache() local 222 mapping = page_mapping_file(page); in update_mmu_cache() 226 if(mapping) in update_mmu_cache() [all …]
|
/linux-6.3-rc2/Documentation/translations/zh_CN/mm/ |
A D | page_migration.rst | 143 2. ``int (*migratepage) (struct address_space *mapping,`` 168 void __SetPageMovable(struct page *page, struct address_space *mapping) 171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低 175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; 177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可 178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。 181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如 182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE, 185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page() 189 同,PageMovable()在lock_page()下验证page->mapping和 [all …]
|
/linux-6.3-rc2/drivers/net/wireless/marvell/mwifiex/ |
A D | util.h | 57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
/linux-6.3-rc2/drivers/sh/clk/ |
A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 378 kref_get(&mapping->ref); in clk_establish_mapping() 381 clk->mapping = mapping; in clk_establish_mapping() 394 iounmap(mapping->base); in clk_destroy_mapping() 399 struct clk_mapping *mapping = clk->mapping; in clk_teardown_mapping() local [all …]
|
/linux-6.3-rc2/Documentation/driver-api/ |
A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 72 undoes the side effects of the mapping functions. 77 void *io_mapping_map_wc(struct io_mapping *mapping, [all …]
|
/linux-6.3-rc2/fs/afs/ |
A D | write.c | 32 return fscache_dirty_folio(mapping, folio, in afs_dirty_folio() 33 afs_vnode_cache(AFS_FS_I(mapping->host))); in afs_dirty_folio() 234 folio = filemap_get_folio(mapping, index); in afs_kill_pages() 258 struct address_space *mapping, in afs_redirty_pages() argument 478 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback() 672 mapping_set_error(mapping, ret); in afs_write_back_from_locked_folio() 678 mapping_set_error(mapping, -ENOSPC); in afs_write_back_from_locked_folio() 689 afs_kill_pages(mapping, start, len); in afs_write_back_from_locked_folio() 690 mapping_set_error(mapping, ret); in afs_write_back_from_locked_folio() 745 if (folio->mapping != mapping || in afs_writepages_region() [all …]
|