| /linux/drivers/iommu/iommufd/ |
| A D | pages.c | 620 for (; pages != end; pages++) in batch_from_pages() 879 pages->last_npinned = pages->npinned; in do_update_pinned() 891 if (pages->npinned == pages->last_npinned) in update_unpinned() 893 do_update_pinned(pages, pages->last_npinned - pages->npinned, false, in update_unpinned() 912 if (pages->npinned == pages->last_npinned) in pfn_reader_user_update_pinned() 962 struct iopt_pages *pages = pfns->pages; in pfn_reader_unpin() local 1070 pfns->pages = pages; in pfn_reader_init() 1094 struct iopt_pages *pages = pfns->pages; in pfn_reader_release_pins() local 1114 struct iopt_pages *pages = pfns->pages; in pfn_reader_destroy() local 1920 struct iopt_pages *pages = area->pages; in iopt_area_add_access() local [all …]
|
| A D | io_pagetable.c | 377 elm->area->pages = elm->pages; in iopt_map_pages() 631 elm->pages = area->pages; in iopt_get_pages() 704 pages = area->pages; in iopt_unmap_iova_range() 895 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local 914 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local 916 if (!pages) in iopt_unfill_domain() 948 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local 973 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local 1183 struct iopt_pages *pages = area->pages; in iopt_area_split() local 1254 lhs->pages = area->pages; in iopt_area_split() [all …]
|
| /linux/net/ceph/ |
| A D | pagevec.c | 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() 45 if (!pages) in ceph_alloc_page_vector() 49 if (pages[i] == NULL) { in ceph_alloc_page_vector() 50 ceph_release_page_vector(pages, i); in ceph_alloc_page_vector() [all …]
|
| /linux/io_uring/ |
| A D | memmap.c | 33 pages[i] = page + i; in io_mem_alloc_compound() 46 if (!pages[i]) in io_mem_alloc_single() 69 if (!pages) in io_pages_map() 84 kvfree(pages); in io_pages_map() 115 kvfree(*pages); in io_pages_unmap() 116 *pages = NULL; in io_pages_unmap() 129 *pages = NULL; in io_pages_free() 145 if (!pages) in io_pin_pages() 149 pages); in io_pin_pages() 153 return pages; in io_pin_pages() [all …]
|
| /linux/mm/ |
| A D | percpu-vm.c | 34 static struct page **pages; in pcpu_get_pages() local 39 if (!pages) in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 165 pages[pcpu_page_idx(cpu, i)] = page; in pcpu_unmap_pages() 197 PAGE_KERNEL, pages, PAGE_SHIFT); in __pcpu_map_pages() 279 struct page **pages; in pcpu_populate_chunk() local 281 pages = pcpu_get_pages(); in pcpu_populate_chunk() 282 if (!pages) in pcpu_populate_chunk() 315 struct page **pages; in pcpu_depopulate_chunk() local 322 pages = pcpu_get_pages(); in pcpu_depopulate_chunk() [all …]
|
| A D | gup_test.c | 19 put_page(pages[i]); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 106 struct page **pages; in __gup_test_ioctl() local 116 if (!pages) in __gup_test_ioctl() 140 pages + i); in __gup_test_ioctl() 147 pages + i); in __gup_test_ioctl() 155 pages + i); in __gup_test_ioctl() 160 pages + i); in __gup_test_ioctl() 202 kvfree(pages); in __gup_test_ioctl() 249 if (!pages) in pin_longterm_test_start() [all …]
|
| A D | gup.c | 1507 if (pages) { in __get_user_pages() 1520 if (pages) { in __get_user_pages() 1784 if (likely(pages)) in __get_user_pages_locked() 1785 pages += ret; in __get_user_pages_locked() 1832 if (likely(pages)) in __get_user_pages_locked() 1833 pages++; in __get_user_pages_locked() 2083 if (pages) { in __get_user_pages_locked() 2085 if (pages[i]) in __get_user_pages_locked() 2487 .pages = pages, in check_and_migrate_movable_pages() 3242 pages, nr)) in gup_fast_pmd_range() [all …]
|
| /linux/drivers/gpu/drm/i915/gem/selftests/ |
| A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 46 kfree(pages); in huge_get_pages() 50 sg = pages->sgl; in huge_get_pages() 68 src = pages->sgl; in huge_get_pages() 80 huge_free_pages(obj, pages); in huge_get_pages() [all …]
|
| /linux/fs/isofs/ |
| A D | compress.c | 68 if (!pages[i]) in zisofs_uncompress_block() 121 if (pages[curpage]) { in zisofs_uncompress_block() 174 if (pages[curpage]) { in zisofs_uncompress_block() 290 if (poffset && *pages) { in zisofs_fill_pages() 313 struct page **pages; in zisofs_read_folio() local 340 if (!pages) { in zisofs_read_folio() 344 pages[full_page] = page; in zisofs_read_folio() 355 if (pages[i]) { in zisofs_read_folio() 357 unlock_page(pages[i]); in zisofs_read_folio() 359 put_page(pages[i]); in zisofs_read_folio() [all …]
|
| /linux/drivers/xen/ |
| A D | xlate_mmu.c | 71 struct page **pages; member 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 227 if (!pages) in xen_xlate_map_ballooned_pages() 232 kfree(pages); in xen_xlate_map_ballooned_pages() 239 kfree(pages); in xen_xlate_map_ballooned_pages() 253 kfree(pages); in xen_xlate_map_ballooned_pages() 257 kfree(pages); in xen_xlate_map_ballooned_pages() [all …]
|
| /linux/drivers/gpu/drm/xen/ |
| A D | xen_drm_front_gem.c | 30 struct page **pages; member 56 kvfree(xen_obj->pages); in gem_free_pages_array() 57 xen_obj->pages = NULL; in gem_free_pages_array() 177 if (IS_ERR(xen_obj->pages)) { in gem_create() 179 xen_obj->pages = NULL; in gem_create() 210 if (xen_obj->pages) { in xen_drm_front_gem_free_object_unlocked() 213 xen_obj->pages); in xen_drm_front_gem_free_object_unlocked() 229 return xen_obj->pages; in xen_drm_front_gem_get_pages() 236 if (!xen_obj->pages) in xen_drm_front_gem_get_sg_table() 272 xen_obj->pages); in xen_drm_front_gem_import_sg_table() [all …]
|
| /linux/kernel/dma/ |
| A D | remap.c | 17 return area->pages; in dma_common_find_pages() 24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 32 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 44 struct page **pages; in dma_common_contiguous_remap() local 48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 49 if (!pages) in dma_common_contiguous_remap() 52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap() 54 kvfree(pages); in dma_common_contiguous_remap()
|
| /linux/arch/s390/hypfs/ |
| A D | hypfs_diag.c | 59 *pages = diag204_buf_pages; in diag204_get_buffer() 63 *pages = 1; in diag204_get_buffer() 65 *pages = diag204((unsigned long)DIAG204_SUBC_RSI | in diag204_get_buffer() 67 if (*pages <= 0) in diag204_get_buffer() 70 diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE), in diag204_get_buffer() 75 diag204_buf_pages = *pages; in diag204_get_buffer() 96 int pages, rc; in diag204_probe() local 98 buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages); in diag204_probe() 117 buf = diag204_get_buffer(DIAG204_INFO_SIMPLE, &pages); in diag204_probe() 139 int diag204_store(void *buf, int pages) in diag204_store() argument [all …]
|
| /linux/drivers/media/pci/intel/ipu6/ |
| A D | ipu6-dma.c | 23 struct page **pages; member 68 if (!pages) in __dma_alloc_buffer() 79 if (!pages[i]) in __dma_alloc_buffer() 86 pages[i + j] = pages[i] + j; in __dma_alloc_buffer() 94 return pages; in __dma_alloc_buffer() 97 if (pages[i]) in __dma_alloc_buffer() 99 kvfree(pages); in __dma_alloc_buffer() 114 kvfree(pages); in __dma_free_buffer() 177 if (!pages) in ipu6_dma_alloc() 212 info->pages = pages; in ipu6_dma_alloc() [all …]
|
| /linux/drivers/staging/media/ipu3/ |
| A D | ipu3-dmamap.c | 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; in imgu_dmamap_alloc() local 113 if (!pages) in imgu_dmamap_alloc() 131 map->pages = pages; in imgu_dmamap_alloc() [all …]
|
| /linux/drivers/media/common/videobuf2/ |
| A D | frame_vector.c | 82 struct page **pages; in put_vaddr_frames() local 86 pages = frame_vector_pages(vec); in put_vaddr_frames() 92 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames() 95 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames() 114 struct page **pages; in frame_vector_to_pages() local 122 pages = (struct page **)nums; in frame_vector_to_pages() 124 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages() 140 struct page **pages; in frame_vector_to_pfns() local 144 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns() 145 nums = (unsigned long *)pages; in frame_vector_to_pfns() [all …]
|
| /linux/Documentation/admin-guide/mm/ |
| A D | hugetlbpage.rst | 30 and surplus huge pages in the pool of huge pages of default size. 46 is the size of the pool of huge pages. 69 pages of all sizes. 80 pages in the kernel's huge page pool. "Persistent" huge pages will be 171 default sized persistent huge pages:: 205 huge pages can grow, if more huge pages than ``/proc/sys/vm/nr_hugepages`` are 213 surplus pages will first be promoted to persistent huge pages. Then, additional 226 of the in-use huge pages to surplus huge pages. This will occur even if 247 pages may exist:: 270 pages size are allowed. [all …]
|
| /linux/include/linux/ |
| A D | balloon_compaction.h | 57 struct list_head pages; /* Pages enqueued & handled to Host */ member 67 struct list_head *pages); 69 struct list_head *pages, size_t n_req_pages); 75 INIT_LIST_HEAD(&balloon->pages); in balloon_devinfo_init() 97 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 141 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 164 static inline void balloon_page_push(struct list_head *pages, struct page *page) in balloon_page_push() argument 166 list_add(&page->lru, pages); in balloon_page_push() 176 static inline struct page *balloon_page_pop(struct list_head *pages) in balloon_page_pop() argument 178 struct page *page = list_first_entry_or_null(pages, struct page, lru); in balloon_page_pop()
|
| /linux/net/rds/ |
| A D | info.c | 65 struct page **pages; member 122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy() 127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy() 140 iter->pages++; in rds_info_copy() 166 struct page **pages = NULL; in rds_info_getsockopt() local 191 if (!pages) { in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 214 iter.pages = pages; in rds_info_getsockopt() 237 if (pages) in rds_info_getsockopt() 238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt() [all …]
|
| /linux/drivers/tee/ |
| A D | tee_shm.c | 23 put_page(pages[n]); in shm_put_kernel_pages() 31 get_page(pages[n]); in shm_get_kernel_pages() 36 if (shm->pages) { in release_registered_pages() 42 kfree(shm->pages); in release_registered_pages() 230 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); in tee_dyn_shm_alloc_helper() 231 if (!pages) { in tee_dyn_shm_alloc_helper() 239 shm->pages = pages; in tee_dyn_shm_alloc_helper() 265 kfree(shm->pages); in tee_dyn_shm_free_helper() 266 shm->pages = NULL; in tee_dyn_shm_free_helper() 311 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper() [all …]
|
| /linux/Documentation/arch/powerpc/ |
| A D | vmemmap_dedup.rst | 14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap 15 page can contain 1024 struct pages (64K/sizeof(struct page)). Hence there is no 18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K 19 vmemmap page can contain 1024 struct pages (64K/sizeof(struct page)). Hence we 20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping. 46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single 47 4K vmemmap page contains 64 struct pages(4K/sizeof(struct page)). Hence we 48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping. 74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K 75 vmemmap page can contain 64 struct pages (4K/sizeof(struct page)). Hence we [all …]
|
| /linux/drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_pages.c | 20 struct sg_table *pages) in __i915_gem_object_set_pages() argument 37 drm_clflush_sg(pages); in __i915_gem_object_set_pages() 46 obj->mm.pages = pages; in __i915_gem_object_set_pages() 216 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages() 218 return pages; in __i915_gem_object_unset_pages() 236 return pages; in __i915_gem_object_unset_pages() 313 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in i915_gem_object_map_page() 314 if (!pages) in i915_gem_object_map_page() 320 pages[i++] = page; in i915_gem_object_map_page() 322 if (pages != stack) in i915_gem_object_map_page() [all …]
|
| A D | i915_gem_phys.c | 99 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument 101 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys() 102 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys() 131 sg_free_table(pages); in i915_gem_object_put_pages_phys() 132 kfree(pages); in i915_gem_object_put_pages_phys() 192 struct sg_table *pages; in i915_gem_object_shmem_to_phys() local 195 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_shmem_to_phys() 204 if (!IS_ERR_OR_NULL(pages)) in i915_gem_object_shmem_to_phys() 205 i915_gem_object_put_pages_shmem(obj, pages); in i915_gem_object_shmem_to_phys() 211 if (!IS_ERR_OR_NULL(pages)) in i915_gem_object_shmem_to_phys() [all …]
|
| /linux/arch/m68k/mm/ |
| A D | sun3kmap.c | 47 unsigned long type, int pages) in do_pmeg_mapin() argument 53 while(pages) { in do_pmeg_mapin() 57 pages--; in do_pmeg_mapin() 66 int pages; in sun3_ioremap() local 85 pages = size / PAGE_SIZE; in sun3_ioremap() 89 while(pages) { in sun3_ioremap() 93 if(seg_pages > pages) in sun3_ioremap() 94 seg_pages = pages; in sun3_ioremap() 98 pages -= seg_pages; in sun3_ioremap()
|
| /linux/Documentation/mm/ |
| A D | vmemmap_dedup.rst | 17 HugeTLB pages consist of multiple base page size pages and is supported by many 21 consists of 512 base pages and a 1GB HugeTLB page consists of 262144 base pages. 51 structs which size is (unit: pages):: 74 = 8 (pages) 89 = PAGE_SIZE / 8 * 8 (pages) 90 = PAGE_SIZE (pages) 104 HugeTLB struct pages(8 pages) page frame(8 pages) 133 7 pages to the buddy allocator. 137 HugeTLB struct pages(8 pages) page frame(8 pages) 199 to 4 on HugeTLB pages. [all …]
|