| /drivers/iommu/ |
| A D | iommu-pages.c | 40 struct folio *folio; in iommu_alloc_pages_node_sz() local 59 folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid); in iommu_alloc_pages_node_sz() 60 if (unlikely(!folio)) in iommu_alloc_pages_node_sz() 73 mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt); in iommu_alloc_pages_node_sz() 74 lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt); in iommu_alloc_pages_node_sz() 76 return folio_address(folio); in iommu_alloc_pages_node_sz() 82 struct folio *folio = ioptdesc_folio(iopt); in __iommu_free_desc() local 83 const unsigned long pgcnt = 1UL << folio_order(folio); in __iommu_free_desc() 85 mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt); in __iommu_free_desc() 86 lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt); in __iommu_free_desc() [all …]
|
| A D | iommu-pages.h | 34 static inline struct ioptdesc *folio_ioptdesc(struct folio *folio) in folio_ioptdesc() argument 36 return (struct ioptdesc *)folio; in folio_ioptdesc() 39 static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt) in ioptdesc_folio() 41 return (struct folio *)iopt; in ioptdesc_folio()
|
| /drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_shmem.c | 37 struct folio *last = NULL; in shmem_sg_free_table() 44 struct folio *folio = page_folio(page); in shmem_sg_free_table() local 46 if (folio == last) in shmem_sg_free_table() 48 last = folio; in shmem_sg_free_table() 50 folio_mark_dirty(folio); in shmem_sg_free_table() 52 folio_mark_accessed(folio); in shmem_sg_free_table() 102 struct folio *folio; in shmem_sg_alloc_table() local 113 if (!IS_ERR(folio)) in shmem_sg_alloc_table() 117 ret = PTR_ERR(folio); in shmem_sg_alloc_table() 307 struct folio *folio = NULL; in __shmem_writeback() local [all …]
|
| /drivers/scsi/ |
| A D | scsicam.c | 37 struct folio *folio; in scsi_bios_ptable() local 39 folio = read_mapping_folio(mapping, 0, NULL); in scsi_bios_ptable() 40 if (IS_ERR(folio)) in scsi_bios_ptable() 43 res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL); in scsi_bios_ptable() 44 folio_put(folio); in scsi_bios_ptable()
|
| /drivers/gpu/drm/ |
| A D | drm_gem.c | 622 struct folio *folio; in drm_gem_get_pages() local 651 if (IS_ERR(folio)) in drm_gem_get_pages() 655 pages[i] = folio_file_page(folio, i); in drm_gem_get_pages() 663 (folio_pfn(folio) >= 0x00100000UL)); in drm_gem_get_pages() 682 return ERR_CAST(folio); in drm_gem_get_pages() 713 struct folio *folio; in drm_gem_put_pages() local 717 folio = page_folio(pages[i]); in drm_gem_put_pages() 720 folio_mark_dirty(folio); in drm_gem_put_pages() 723 folio_mark_accessed(folio); in drm_gem_put_pages() 726 if (!folio_batch_add(&fbatch, folio)) in drm_gem_put_pages() [all …]
|
| /drivers/misc/lkdtm/ |
| A D | usercopy.c | 405 struct folio *folio; in lkdtm_USERCOPY_FOLIO() local 412 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1); in lkdtm_USERCOPY_FOLIO() 413 if (!folio) { in lkdtm_USERCOPY_FOLIO() 417 addr = folio_address(folio); in lkdtm_USERCOPY_FOLIO() 422 folio_put(folio); in lkdtm_USERCOPY_FOLIO()
|
| /drivers/gpu/drm/ttm/ |
| A D | ttm_backup.c | 55 struct folio *from_folio; in ttm_backup_copy_page() 102 struct folio *to_folio; in ttm_backup_backup_page()
|
| /drivers/dax/ |
| A D | device.c | 91 struct folio *folio = pfn_folio(pfn + i); in dax_set_mapping() local 93 if (folio->mapping) in dax_set_mapping() 96 folio->mapping = filp->f_mapping; in dax_set_mapping() 97 folio->index = pgoff + i; in dax_set_mapping()
|
| /drivers/iommu/iommufd/ |
| A D | pages.c | 644 struct folio **folios = *folios_p; in batch_from_folios() 648 struct folio *folio = *folios; in batch_from_folios() local 649 unsigned long nr = folio_nr_pages(folio) - offset; in batch_from_folios() 658 rc = folio_add_pins(folio, nr - 1); in batch_from_folios() 759 struct folio **ufolios; 762 struct folio **ufolios_next; 826 struct folio *folio = user->ufolios[i]; in pin_memfd_pages() local 827 unsigned long nr = folio_nr_pages(folio); in pin_memfd_pages() 835 *upages++ = folio_page(folio, offset); in pin_memfd_pages() 837 int rc = folio_add_pins(folio, npin - 1); in pin_memfd_pages() [all …]
|
| /drivers/gpu/drm/nouveau/ |
| A D | nouveau_svm.c | 593 struct folio *folio; in nouveau_atomic_range_fault() local 613 page = make_device_exclusive(mm, start, drm->dev, &folio); in nouveau_atomic_range_fault() 619 folio = page_folio(page); in nouveau_atomic_range_fault() 627 folio_unlock(folio); in nouveau_atomic_range_fault() 628 folio_put(folio); in nouveau_atomic_range_fault() 644 folio_unlock(folio); in nouveau_atomic_range_fault() 645 folio_put(folio); in nouveau_atomic_range_fault()
|
| /drivers/gpu/drm/i915/ |
| A D | i915_gpu_error.c | 199 struct folio *folio; in pool_refill() local 201 folio = folio_alloc(gfp, 0); in pool_refill() 202 if (!folio) in pool_refill() 205 folio_batch_add(fbatch, folio); in pool_refill() 226 struct folio *folio; in pool_alloc() local 228 folio = folio_alloc(gfp, 0); in pool_alloc() 230 folio = fbatch->folios[--fbatch->nr]; in pool_alloc() 232 return folio ? folio_address(folio) : NULL; in pool_alloc() 237 struct folio *folio = virt_to_folio(addr); in pool_free() local 240 folio_batch_add(fbatch, folio); in pool_free() [all …]
|
| /drivers/dma-buf/ |
| A D | udmabuf.c | 29 struct folio **folios; 40 struct folio **pinned_folios; 326 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() 376 struct folio **folios = NULL; in udmabuf_create()
|
| /drivers/s390/net/ |
| A D | ism_drv.c | 304 struct folio *folio; in ism_alloc_dmb() local 323 folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | in ism_alloc_dmb() 326 if (!folio) { in ism_alloc_dmb() 331 dmb->cpu_addr = folio_address(folio); in ism_alloc_dmb()
|
| /drivers/md/bcache/ |
| A D | super.c | 171 struct folio *folio; in read_super() local 174 folio = mapping_read_folio_gfp(bdev->bd_mapping, in read_super() 176 if (IS_ERR(folio)) in read_super() 178 s = folio_address(folio) + offset_in_folio(folio, SB_OFFSET); in read_super() 275 folio_put(folio); in read_super()
|
| /drivers/md/ |
| A D | dm-flakey.c | 413 folio_put(fi.folio); in clone_free()
|
| A D | dm-crypt.c | 1782 if (folio_test_large(fi.folio)) { in crypt_free_buffer_pages() 1784 1 << folio_order(fi.folio)); in crypt_free_buffer_pages() 1785 folio_put(fi.folio); in crypt_free_buffer_pages() 1787 mempool_free(&fi.folio->page, &cc->page_pool); in crypt_free_buffer_pages()
|