| /drivers/gpu/drm/vmwgfx/ |
| A D | vmwgfx_gmr.c | 39 unsigned long num_pages, in vmw_gmr2_bind() argument 47 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind() 48 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind() 58 define_cmd.numPages = num_pages; in vmw_gmr2_bind() 73 while (num_pages > 0) { in vmw_gmr2_bind() 74 unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP); in vmw_gmr2_bind() 94 num_pages -= nr; in vmw_gmr2_bind() 128 unsigned long num_pages, in vmw_gmr_bind() argument 141 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
|
| A D | vmwgfx_ttm_buffer.c | 70 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next() 107 viter->num_pages = vsgt->num_pages; in vmw_piter_start() 184 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; in vmw_ttm_map_dma() 196 vsgt->pages, vsgt->num_pages, 0, in vmw_ttm_map_dma() 197 (unsigned long)vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma() 296 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 301 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 307 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 373 ttm->num_pages); in vmw_ttm_populate()
|
| /drivers/gpu/drm/ttm/ |
| A D | ttm_pool.c | 224 if (!num_pages) in ttm_pool_apply_caching() 375 unsigned int num_pages; in ttm_pool_shrink() local 389 num_pages = 0; in ttm_pool_shrink() 393 return num_pages; in ttm_pool_shrink() 974 pgoff_t i, num_pages; in ttm_pool_backup() local 990 for (i = 0; i < tt->num_pages; i += num_pages) { in ttm_pool_backup() 995 num_pages = 1; in ttm_pool_backup() 1003 num_pages); in ttm_pool_backup() 1024 num_pages = tt->num_pages; in ttm_pool_backup() 1028 num_pages = DIV_ROUND_UP(num_pages, 2); in ttm_pool_backup() [all …]
|
| A D | ttm_tt.c | 131 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory() 239 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin() 323 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; in ttm_tt_swapout() 339 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapout() 359 return ttm->num_pages; in ttm_tt_swapout() 380 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); in ttm_tt_populate() 382 atomic_long_add(ttm->num_pages, in ttm_tt_populate() 418 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); in ttm_tt_populate() 420 atomic_long_sub(ttm->num_pages, in ttm_tt_populate() 443 atomic_long_sub(ttm->num_pages, in ttm_tt_unpopulate() [all …]
|
| A D | ttm_device.c | 72 unsigned long num_pages, num_dma32; in ttm_global_init() local 90 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT; in ttm_global_init() 91 num_pages /= 2; in ttm_global_init() 98 ttm_pool_mgr_init(num_pages); in ttm_global_init() 99 ttm_tt_mgr_init(num_pages, num_dma32); in ttm_global_init()
|
| A D | ttm_bo_util.c | 89 u32 num_pages, in ttm_move_memcpy() argument 104 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 116 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 342 unsigned long num_pages, in ttm_bo_kmap_ttm() argument 362 if (num_pages == 1 && ttm->caching == ttm_cached && in ttm_bo_kmap_ttm() 379 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm() 428 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument 436 if (num_pages > PFN_UP(bo->resource->size)) in ttm_bo_kmap() 445 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); in ttm_bo_kmap() 448 size = num_pages << PAGE_SHIFT; in ttm_bo_kmap() [all …]
|
| A D | ttm_range_manager.c | 124 u32 num_pages = PFN_UP(size); in ttm_range_man_intersects() local 127 if (place->fpfn >= (node->start + num_pages) || in ttm_range_man_intersects() 140 u32 num_pages = PFN_UP(size); in ttm_range_man_compatible() local 143 (place->lpfn && (node->start + num_pages) > place->lpfn)) in ttm_range_man_compatible()
|
| /drivers/xen/ |
| A D | xen-front-pgdir-shbuf.c | 160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir() 187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs() 208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap() 213 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 222 buf->num_pages); in backend_unmap() 224 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 257 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map() 271 grefs_left = buf->num_pages; in backend_map() 372 grefs_left = buf->num_pages; in guest_fill_page_dir() 408 for (i = 0; i < buf->num_pages; i++) { in guest_grant_refs_for_buffer() [all …]
|
| /drivers/gpu/drm/xen/ |
| A D | xen_drm_front_gem.c | 29 size_t num_pages; member 48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); in gem_alloc_pages_array() 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 93 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap() 159 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, in gem_create() 163 xen_obj->num_pages, ret); in gem_create() 175 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); in gem_create() 212 xen_free_unpopulated_pages(xen_obj->num_pages, in xen_drm_front_gem_free_object_unlocked() 240 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table() 265 xen_obj->num_pages); in xen_drm_front_gem_import_sg_table() [all …]
|
| /drivers/media/common/videobuf2/ |
| A D | videobuf2-dma-sg.c | 49 unsigned int num_pages; member 107 int num_pages; in vb2_dma_sg_alloc() local 121 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc() 162 __func__, buf->num_pages); in vb2_dma_sg_alloc() 169 num_pages = buf->num_pages; in vb2_dma_sg_alloc() 170 while (num_pages--) in vb2_dma_sg_alloc() 171 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc() 183 int i = buf->num_pages; in vb2_dma_sg_put() 187 buf->num_pages); in vb2_dma_sg_put() 288 int i = buf->num_pages; in vb2_dma_sg_put_userptr() [all …]
|
| /drivers/infiniband/sw/siw/ |
| A D | siw_mem.c | 44 int i, num_pages = umem->num_pages; in siw_umem_release() local 49 for (i = 0; num_pages > 0; i++) { in siw_umem_release() 51 num_pages -= PAGES_PER_CHUNK; in siw_umem_release() 341 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local 347 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get() 348 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get() 376 for (i = 0; num_pages > 0; i++) { in siw_umem_get() 377 int nents = min_t(int, num_pages, PAGES_PER_CHUNK); in siw_umem_get() 388 umem->num_pages++; in siw_umem_get() 389 num_pages--; in siw_umem_get()
|
| /drivers/tee/ |
| A D | tee_shm.c | 38 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages() 40 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages() 210 size_t num_pages, in tee_dyn_shm_alloc_helper() argument 240 shm->num_pages = nr_pages; in tee_dyn_shm_alloc_helper() 277 size_t num_pages, off; in register_shm_helper() local 305 num_pages = iov_iter_npages(iter, INT_MAX); in register_shm_helper() 306 if (!num_pages) { in register_shm_helper() 329 shm_get_kernel_pages(shm->pages, num_pages); in register_shm_helper() 333 shm->num_pages = num_pages; in register_shm_helper() 336 shm->num_pages, start); in register_shm_helper() [all …]
|
| /drivers/gpu/drm/gma500/ |
| A D | mmu.c | 497 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes() 499 desired_tile_stride = num_pages; in psb_mmu_flush_ptes() 538 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence() 580 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages() 582 desired_tile_stride = num_pages; in psb_mmu_remove_pages() 612 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages() 622 unsigned long address, uint32_t num_pages, in psb_mmu_insert_pfn_sequence() argument 636 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_insert_pfn_sequence() 685 if (num_pages % desired_tile_stride != 0) in psb_mmu_insert_pages() 687 rows = num_pages / desired_tile_stride; in psb_mmu_insert_pages() [all …]
|
| A D | mmu.h | 69 uint32_t num_pages); 73 uint32_t num_pages, int type); 76 unsigned long address, uint32_t num_pages, 80 unsigned long address, uint32_t num_pages,
|
| /drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_userptr.c | 91 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_drop_ref() local 93 unpin_user_pages(pvec, num_pages); in i915_gem_object_userptr_drop_ref() 103 unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ in i915_gem_userptr_get_pages() local 106 if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) in i915_gem_userptr_get_pages() 109 num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_userptr_get_pages() 123 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, in i915_gem_userptr_get_pages() 124 num_pages << PAGE_SHIFT, in i915_gem_userptr_get_pages() 234 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_userptr_submit_init() local 259 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); in i915_gem_object_userptr_submit_init() 267 while (pinned < num_pages) { in i915_gem_object_userptr_submit_init() [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_shrinker.c | 142 unsigned long num_pages; in xe_shrinker_count() local 145 num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT; in xe_shrinker_count() 149 num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages); in xe_shrinker_count() 151 num_pages = 0; in xe_shrinker_count() 153 num_pages += shrinker->purgeable_pages; in xe_shrinker_count() 156 return num_pages ? num_pages : SHRINK_EMPTY; in xe_shrinker_count()
|
| /drivers/hv/ |
| A D | hv_proc.c | 19 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) in hv_call_deposit_pages() argument 32 if (num_pages > HV_DEPOSIT_MAX) in hv_call_deposit_pages() 34 if (!num_pages) in hv_call_deposit_pages() 52 while (num_pages) { in hv_call_deposit_pages() 54 order = 31 - __builtin_clz(num_pages); in hv_call_deposit_pages() 70 num_pages -= counts[i]; in hv_call_deposit_pages()
|
| A D | hv_balloon.c | 276 __u32 num_pages; member 443 __u32 num_pages; member 1185 int num_pages = range_array->finfo.page_cnt; in free_balloon_pages() local 1190 for (i = 0; i < num_pages; i++) { in free_balloon_pages() 1201 unsigned int num_pages, in alloc_balloon_pages() argument 1208 for (i = 0; i < num_pages / alloc_unit; i++) { in alloc_balloon_pages() 1253 unsigned int num_pages = dm_device.balloon_wrk.num_pages; in balloon_up() local 1273 if (avail_pages < num_pages || avail_pages - num_pages < floor) { in balloon_up() 1288 num_pages -= num_ballooned; in balloon_up() 1299 num_pages, dm_device.balloon_wrk.num_pages); in balloon_up() [all …]
|
| /drivers/gpu/drm/imagination/ |
| A D | pvr_free_list.c | 223 struct sg_table *sgt, u32 offset, u32 num_pages) in pvr_free_list_insert_pages_locked() argument 250 num_pages--; in pvr_free_list_insert_pages_locked() 251 if (!num_pages) in pvr_free_list_insert_pages_locked() 255 if (!num_pages) in pvr_free_list_insert_pages_locked() 280 free_list_node->num_pages; in pvr_free_list_insert_node_locked() 289 offset, free_list_node->num_pages); in pvr_free_list_insert_node_locked() 291 free_list->current_pages += free_list_node->num_pages; in pvr_free_list_insert_node_locked() 297 pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages) in pvr_free_list_grow() argument 305 if (num_pages & FREE_LIST_ALIGNMENT) { in pvr_free_list_grow() 316 free_list_node->num_pages = num_pages; in pvr_free_list_grow() [all …]
|
| /drivers/gpu/drm/radeon/ |
| A D | radeon_ttm.c | 141 unsigned num_pages; in radeon_move_blit() local 348 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local 359 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr() 362 (u64)ttm->num_pages << PAGE_SHIFT, in radeon_ttm_tt_pin_userptr() 372 ttm->num_pages); in radeon_ttm_tt_pin_userptr() 439 if (!ttm->num_pages) { in radeon_ttm_backend_bind() 441 ttm->num_pages, bo_mem, ttm); in radeon_ttm_backend_bind() 445 r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages, in radeon_ttm_backend_bind() 449 ttm->num_pages, (unsigned)gtt->offset); in radeon_ttm_backend_bind() 467 radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages); in radeon_ttm_backend_unbind() [all …]
|
| /drivers/net/ethernet/huawei/hinic3/ |
| A D | hinic3_queue_common.c | 18 qpages->num_pages = max(q_depth / elem_per_page, 1); in hinic3_queue_pages_init() 38 __queue_pages_free(hwdev, qpages, qpages->num_pages); in hinic3_queue_pages_free() 47 qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]), in hinic3_queue_pages_alloc() 55 for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) { in hinic3_queue_pages_alloc()
|
| /drivers/gpu/drm/ttm/tests/ |
| A D | ttm_tt_test.c | 47 int num_pages = params->size >> PAGE_SHIFT; in ttm_tt_init_basic() local 58 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages); in ttm_tt_init_basic() 73 int num_pages = (size + SZ_4K) >> PAGE_SHIFT; in ttm_tt_init_misaligned() local 87 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages); in ttm_tt_init_misaligned() 344 int err, num_pages; in ttm_tt_swapin_basic() local 358 num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL); in ttm_tt_swapin_basic() 359 KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages); in ttm_tt_swapin_basic()
|
| /drivers/gpu/drm/ |
| A D | drm_cache.c | 67 unsigned long num_pages) in drm_cache_flush_clflush() argument 72 for (i = 0; i < num_pages; i++) in drm_cache_flush_clflush() 87 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument 92 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages() 101 for (i = 0; i < num_pages; i++) { in drm_clflush_pages()
|
| /drivers/firmware/efi/ |
| A D | memattr.c | 75 u64 in_size = in->num_pages << EFI_PAGE_SHIFT; in entry_is_valid() 88 !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { in entry_is_valid() 102 u64 md_size = md->num_pages << EFI_PAGE_SHIFT; in entry_is_valid() 185 size = md.num_pages << EFI_PAGE_SHIFT; in efi_memattr_apply_permissions()
|
| /drivers/gpu/drm/i915/gt/ |
| A D | selftest_reset.c | 24 resource_size_t num_pages, page; in __igt_reset_stolen() local 37 num_pages = resource_size(dsm) >> PAGE_SHIFT; in __igt_reset_stolen() 38 if (!num_pages) in __igt_reset_stolen() 41 crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL); in __igt_reset_stolen() 82 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen() 124 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen()
|