| /linux/mm/ |
| A D | truncate.c | 48 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 52 xa_lock_irq(&mapping->i_pages); in clear_shadow_entries() 63 inode_add_lru(mapping->host); in clear_shadow_entries() 79 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() 89 dax = dax_mapping(mapping); in truncate_folio_batch_exceptionals() 174 if (folio->mapping != mapping) in truncate_inode_folio() 240 if (!mapping) in generic_error_remove_folio() 266 if (!mapping) in mapping_evict_folio() 552 if (folio->mapping != mapping) in invalidate_complete_folio2() 582 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder() [all …]
|
| A D | filemap.c | 229 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local 260 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local 816 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local 826 new->mapping = mapping; in replace_page_cache_folio() 873 folio->mapping = mapping; in __filemap_add_folio() 1904 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio() 2111 if (folio->mapping != mapping || in find_lock_entries() 2998 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data() 3368 if (unlikely(folio->mapping != mapping)) { in filemap_fault() 3514 if (folio->mapping != mapping) in next_uptodate_folio() [all …]
|
| A D | readahead.c | 208 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 226 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 264 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded() 291 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded() 330 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local 335 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra() 455 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local 463 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order() 484 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order() 509 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order() [all …]
|
| A D | page-writeback.c | 2443 if (unlikely(folio->mapping != mapping)) in folio_prepare_writeback() 2683 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages() 2834 if (mapping->host) { in filemap_dirty_folio() 2857 struct address_space *mapping = folio->mapping; in folio_redirty_for_writepage() local 2863 if (mapping && mapping_can_writeback(mapping)) { in folio_redirty_for_writepage() 2895 if (likely(mapping)) { in folio_mark_dirty() 2909 return mapping->a_ops->dirty_folio(mapping, folio); in folio_mark_dirty() 2994 if (mapping && mapping_can_writeback(mapping)) { in folio_clear_dirty_for_io() 3078 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_end_writeback() 3096 if (mapping->host && !mapping_tagged(mapping, in __folio_end_writeback() [all …]
|
| /linux/include/linux/ |
| A D | pagemap.h | 247 if (mapping->host) in mapping_set_error() 269 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() 340 return mapping->gfp_mask; in mapping_gfp_mask() 424 mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | in mapping_set_folio_order_range() 555 return folio->mapping; in folio_file_mapping() 594 return folio->mapping->host; in folio_inode() 808 mapping_gfp_mask(mapping)); in filemap_grab_folio() 990 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page() 1321 if (!mapping->nrpages) in filemap_range_needs_writeback() 1360 .mapping = m, \ [all …]
|
| A D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 96 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 97 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 113 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 114 phys_addr = mapping->base + offset; in io_mapping_map_wc() 147 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 149 iounmap(mapping->iomem); in io_mapping_fini() [all …]
|
| A D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() 267 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 271 if (!mapping) { in __calc_tpm2_event_size() 276 mapping = marker; in __calc_tpm2_event_size() [all …]
|
| A D | shmem_fs.h | 100 bool shmem_mapping(struct address_space *mapping); 102 static inline bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument 107 extern void shmem_unlock_mapping(struct address_space *mapping); 108 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 134 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 148 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 151 static inline struct folio *shmem_read_folio(struct address_space *mapping, in shmem_read_folio() argument 154 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); in shmem_read_folio() 158 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 160 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() [all …]
|
| /linux/drivers/gpu/drm/panfrost/ |
| A D | panfrost_gem.c | 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 95 kfree(mapping); in panfrost_gem_mapping_release() 100 if (!mapping) in panfrost_gem_mapping_put() 124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in panfrost_gem_open() 125 if (!mapping) in panfrost_gem_open() 131 mapping->obj = bo; in panfrost_gem_open() 146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode, in panfrost_gem_open() [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| A D | uapi.c | 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 206 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in tegra_drm_ioctl_channel_map() 207 if (!mapping) { in tegra_drm_ioctl_channel_map() 220 if (!mapping->bo) { in tegra_drm_ioctl_channel_map() 243 mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL); in tegra_drm_ioctl_channel_map() 249 mapping->iova = mapping->map->phys; in tegra_drm_ioctl_channel_map() 250 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; in tegra_drm_ioctl_channel_map() 266 kfree(mapping); in tegra_drm_ioctl_channel_map() 287 mapping = xa_erase(&context->mappings, args->mapping); in tegra_drm_ioctl_channel_unmap() [all …]
|
| /linux/include/trace/events/ |
| A D | filemap.h | 34 if (folio->mapping->host->i_sb) 62 struct address_space *mapping, 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 98 struct address_space *mapping, 107 struct address_space *mapping, 117 TP_ARGS(mapping, index), 127 if (mapping->host->i_sb) 129 mapping->host->i_sb->s_dev; 146 TP_ARGS(mapping, eseq), [all …]
|
| /linux/drivers/media/usb/uvc/ |
| A D | uvc_ctrl.c | 1273 return mapping->get(mapping, UVC_GET_RES, in uvc_get_ctrl_bitmap() 1277 return mapping->get(mapping, UVC_GET_MAX, in uvc_get_ctrl_bitmap() 1962 min = mapping->get(mapping, UVC_GET_MIN, in uvc_ctrl_set() 1964 max = mapping->get(mapping, UVC_GET_MAX, in uvc_ctrl_set() 1966 step = mapping->get(mapping, UVC_GET_RES, in uvc_ctrl_set() 2046 mapping->set(mapping, value, in uvc_ctrl_set() 2448 if (mapping->menu_mapping && mapping->menu_mask) { in __uvc_ctrl_add_mapping() 2456 if (mapping->menu_names && mapping->menu_mask) { in __uvc_ctrl_add_mapping() 2506 uvc_map_get_name(mapping), mapping->id); in uvc_ctrl_add_mapping() 2553 uvc_map_get_name(mapping), mapping->id); in uvc_ctrl_add_mapping() [all …]
|
| /linux/arch/arm/mm/ |
| A D | dma-mapping.c | 827 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova() 1568 if (!mapping) in arm_iommu_create_mapping() 1583 mapping->base = base; in arm_iommu_create_mapping() 1595 return mapping; in arm_iommu_create_mapping() 1601 kfree(mapping); in arm_iommu_create_mapping() 1617 kfree(mapping); in release_iommu_mapping() 1624 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping() 1628 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping() 1640 if (mapping) in arm_iommu_release_mapping() 1700 if (!mapping) { in arm_iommu_detach_device() [all …]
|
| A D | flush.c | 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local 303 mapping = folio_flush_mapping(folio); in __sync_icache_dcache() 305 mapping = NULL; in __sync_icache_dcache() 308 __flush_dcache_folio(mapping, folio); in __sync_icache_dcache() 336 struct address_space *mapping; in flush_dcache_folio() local 351 mapping = folio_flush_mapping(folio); in flush_dcache_folio() 354 mapping && !folio_mapped(folio)) in flush_dcache_folio() 358 if (mapping && cache_is_vivt()) in flush_dcache_folio() [all …]
|
| /linux/drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_mmu.c | 143 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 147 mapping->context = NULL; in etnaviv_iommu_reap_mapping() 297 mapping->iova = iova; in etnaviv_iommu_map_gem() 305 node = &mapping->vram_node; in etnaviv_iommu_map_gem() 316 mapping->iova = node->start; in etnaviv_iommu_map_gem() 336 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem() 341 if (!mapping->context) { in etnaviv_iommu_unmap_gem() 418 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va() 419 mapping->use++; in etnaviv_iommu_get_suballoc_va() 455 mapping->use = 1; in etnaviv_iommu_get_suballoc_va() [all …]
|
| A D | etnaviv_gem.c | 222 return mapping; in etnaviv_gem_get_vram_mapping() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 251 if (mapping) { in etnaviv_gem_mapping_get() 270 if (mapping) in etnaviv_gem_mapping_get() 289 if (!mapping) { in etnaviv_gem_mapping_get() 290 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in etnaviv_gem_mapping_get() 291 if (!mapping) { in etnaviv_gem_mapping_get() 302 mapping->use = 1; in etnaviv_gem_mapping_get() 308 kfree(mapping); in etnaviv_gem_mapping_get() 320 return mapping; in etnaviv_gem_mapping_get() [all …]
|
| /linux/drivers/gpu/drm/exynos/ |
| A D | exynos_drm_dma.c | 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 109 if (!priv->mapping) { in exynos_drm_register_dma() 110 void *mapping = NULL; in exynos_drm_register_dma() local 113 mapping = arm_iommu_create_mapping(dev, in exynos_drm_register_dma() 116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 118 if (!mapping) in exynos_drm_register_dma() 120 priv->mapping = mapping; in exynos_drm_register_dma() 140 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
| /linux/fs/gfs2/ |
| A D | aops.c | 126 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio() 182 struct inode *inode = mapping->host; in gfs2_write_jdata_batch() 205 if (unlikely(folio->mapping != mapping)) { in gfs2_write_jdata_batch() 315 tag_pages_for_writeback(mapping, index, end); in gfs2_write_cache_jdata() 346 mapping->writeback_index = done_index; in gfs2_write_cache_jdata() 366 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 370 ret = gfs2_write_cache_jdata(mapping, wbc); in gfs2_jdata_writepages() 418 struct inode *inode = folio->mapping->host; in gfs2_read_folio() 492 struct inode *inode = rac->mapping->host; in gfs2_readahead() 548 return block_dirty_folio(mapping, folio); in jdata_dirty_folio() [all …]
|
| /linux/Documentation/arch/powerpc/ |
| A D | vmemmap_dedup.rst | 14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap 18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K 20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping. 23 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 35 | mapping | +-----------+ | | 46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single 48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping. 52 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 64 | mapping | +-----------+ | | 74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K [all …]
|
| /linux/Documentation/translations/zh_CN/mm/ |
| A D | page_migration.rst | 143 2. ``int (*migratepage) (struct address_space *mapping,`` 168 void __SetPageMovable(struct page *page, struct address_space *mapping) 171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低 175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; 177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可 178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。 181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如 182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE, 185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page() 189 同,PageMovable()在lock_page()下验证page->mapping和 [all …]
|
| /linux/drivers/net/wireless/marvell/mwifiex/ |
| A D | util.h | 57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
| /linux/drivers/sh/clk/ |
| A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 378 kref_get(&mapping->ref); in clk_establish_mapping() 381 clk->mapping = mapping; in clk_establish_mapping() 394 iounmap(mapping->base); in clk_destroy_mapping() 399 struct clk_mapping *mapping = clk->mapping; in clk_teardown_mapping() local [all …]
|
| /linux/fs/ |
| A D | dax.c | 339 if (page->mapping) in dax_page_share_get() 373 page->mapping = mapping; in dax_associate_entry() 396 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry() 397 page->mapping = NULL; in dax_disassociate_entry() 431 struct address_space *mapping = READ_ONCE(folio->mapping); in dax_lock_folio() local 434 if (!mapping || !dax_mapping(mapping)) in dax_lock_folio() 450 if (mapping != folio->mapping) { in dax_lock_folio() 472 struct address_space *mapping = folio->mapping; in dax_unlock_folio() local 693 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range() 1718 .inode = mapping->host, in dax_iomap_pte_fault() [all …]
|
| /linux/Documentation/driver-api/ |
| A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 72 undoes the side effects of the mapping functions. 77 void *io_mapping_map_wc(struct io_mapping *mapping, [all …]
|
| /linux/arch/nios2/include/asm/ |
| A D | cacheflush.h | 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) argument 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) argument 56 #define flush_dcache_mmap_lock_irqsave(mapping, flags) \ argument 57 xa_lock_irqsave(&mapping->i_pages, flags) 58 #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ argument 59 xa_unlock_irqrestore(&mapping->i_pages, flags)
|