| /linux/tools/testing/selftests/mm/ |
| A D | hmm-tests.c | 189 cmd.npages = npages; in hmm_dmirror_cmd() 295 unsigned long npages; in TEST_F() local 303 ASSERT_NE(npages, 0); in TEST_F() 1159 npages = 6; in TEST_F() 1335 npages); in TEST_F() 1404 npages); in TEST_F() 1432 npages = 1; in TEST_F() 1477 npages = 7; in TEST_F() 1652 npages = 6; in TEST_F() 1913 npages = 4; in TEST_F() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| A D | scatterlist.c | 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() argument 211 return first + npages == last; in page_contiguous() 246 pfn_to_page(pfn + npages), in alloc_table() 247 npages)) { in alloc_table() 260 pfn += npages; in alloc_table() 292 const npages_fn_t *npages; in igt_sg_alloc() local 296 for (npages = npages_funcs; *npages; npages++) { in igt_sg_alloc() 334 const npages_fn_t *npages; in igt_sg_trim() local 337 for (npages = npages_funcs; *npages; npages++) { in igt_sg_trim() [all …]
|
| /linux/lib/ |
| A D | kunit_iov_iter.c | 50 size_t npages) in iov_kunit_create_buffer() argument 61 if (got != npages) { in iov_kunit_create_buffer() 66 for (int i = 0; i < npages; i++) in iov_kunit_create_buffer() 113 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_kvec() 163 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_kvec() 272 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_bvec() 326 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_bvec() 430 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_folioq() 492 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_folioq() 592 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_xarray() [all …]
|
| /linux/io_uring/ |
| A D | memmap.c | 80 *npages = nr_pages; in io_pages_map() 86 *npages = 0; in io_pages_map() 98 if (put_pages && *npages) { in io_pages_unmap() 107 *npages = 1; in io_pages_unmap() 108 else if (*npages > 1) in io_pages_unmap() 110 for (i = 0; i < *npages; i++) in io_pages_unmap() 117 *npages = 0; in io_pages_unmap() 152 *npages = nr_pages; in io_pin_pages() 174 *npages = 0; in __io_uaddr_map() 187 *npages = nr_pages; in __io_uaddr_map() [all …]
|
| A D | memmap.h | 4 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 5 void io_pages_free(struct page ***pages, int npages); 7 struct page **pages, int npages); 9 void *io_pages_map(struct page ***out_pages, unsigned short *npages, 11 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, 14 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
| /linux/drivers/infiniband/hw/hfi1/ |
| A D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages() 66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages() 74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages() 80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 96 size_t npages, bool dirty) in hfi1_release_user_pages() argument 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
| A D | pin_system.c | 20 unsigned int npages; member 60 evict_data.target = npages; in sdma_cache_evict() 79 if (node->npages) { in free_system_node() 81 node->npages); in free_system_node() 128 npages)) { in pin_system_pages() 132 if (cleared >= npages) in pin_system_pages() 137 start_address, node->npages, npages); in pin_system_pages() 146 if (pinned != npages) { in pin_system_pages() 154 node->npages = npages; in pin_system_pages() 306 page_index, cache_entry->npages); in add_mapping_to_sdma_packet() [all …]
|
| A D | user_exp_rcv.c | 20 u16 pageidx, unsigned int npages); 136 unsigned int npages, in unpin_rcv_pages() argument 153 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 572 if (!npages) in find_phys_blocks() 699 npages = tbuf->psets[setidx].count; in program_rcvarray() 704 npages); in program_rcvarray() 707 mapped += npages; in program_rcvarray() 755 node->npages = npages; in set_rcvarray_entry() 829 node->npages, in __clear_tid_node() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | pagealloc.c | 52 s32 npages; member 199 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 440 int npages = 0; in release_all_pages() local 465 u32 npages) in fwp_fill_manage_pages_out() argument 475 if (!--npages) in fwp_fill_manage_pages_out() 490 u32 npages; in reclaim_pages_cmd() local 506 while (p && i < npages) { in reclaim_pages_cmd() 626 s32 npages; in req_pages_handler() local 656 req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES); in req_pages_handler() 667 s32 npages; in mlx5_satisfy_startup_pages() local [all …]
|
| /linux/arch/sparc/kernel/ |
| A D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 204 int npages, nid; in dma_4u_alloc_coherent() local 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 284 npages >>= IO_PAGE_SHIFT; in dma_4u_map_page() 383 vaddr, ctx, npages); in strbuf_flush() 405 npages >>= IO_PAGE_SHIFT; in dma_4u_unmap_page() 420 npages, direction); in dma_4u_unmap_page() 510 while (npages--) { in dma_4u_map_sg() 642 npages, direction); in dma_4u_unmap_sg() [all …]
|
| A D | pci_sun4v.c | 75 p->npages = 0; in iommu_batch_start() 92 unsigned long npages = p->npages; in iommu_batch_flush() local 101 while (npages != 0) { in iommu_batch_flush() 105 npages, in iommu_batch_flush() 136 npages -= num; in iommu_batch_flush() 141 p->npages = 0; in iommu_batch_flush() 308 npages); in dma_4v_iommu_demap() 318 npages -= num; in dma_4v_iommu_demap() 319 } while (npages != 0); in dma_4v_iommu_demap() 437 unsigned long npages; in dma_4v_unmap_page() local [all …]
|
| /linux/drivers/gpu/drm/i915/gem/selftests/ |
| A D | mock_dmabuf.c | 22 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 27 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 59 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 70 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 82 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap() 99 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 106 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 111 mock->npages = npages; in mock_dmabuf() 112 for (i = 0; i < npages; i++) { in mock_dmabuf() 119 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
| /linux/drivers/gpu/drm/xe/ |
| A D | xe_hmm.c | 34 u64 i, npages; in xe_mark_range_accessed() local 36 npages = xe_npages_in_range(range->start, range->end); in xe_mark_range_accessed() 37 for (i = 0; i < npages; i++) { in xe_mark_range_accessed() 85 u64 i, npages; in xe_build_sg() local 88 npages = xe_npages_in_range(range->start, range->end); in xe_build_sg() 89 pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL); in xe_build_sg() 93 for (i = 0; i < npages; i++) { in xe_build_sg() 98 ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, in xe_build_sg() 178 u64 npages; in xe_hmm_userptr_populate_range() local 196 npages = xe_npages_in_range(userptr_start, userptr_end); in xe_hmm_userptr_populate_range() [all …]
|
| /linux/drivers/iommu/iommufd/ |
| A D | iova_bitmap.c | 48 unsigned long npages; member 169 unsigned long npages; in iova_bitmap_get() local 179 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get() 193 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get() 196 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get() 201 mapped->npages = (unsigned long)ret; in iova_bitmap_get() 224 if (mapped->npages) { in iova_bitmap_put() 225 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() 226 mapped->npages = 0; in iova_bitmap_put() 346 return mapped->npages && in iova_bitmap_mapped_range() [all …]
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_migrate.c | 65 num_bytes = npages * 8; in svm_migrate_gart_map() 138 while (npages) { in svm_migrate_copy_memory_gart() 164 npages -= size; in svm_migrate_copy_memory_gart() 165 if (npages) { in svm_migrate_copy_memory_gart() 404 buf = kvcalloc(npages, in svm_migrate_vma_to_vram() 432 if (cpages != npages) in svm_migrate_vma_to_vram() 434 cpages, npages); in svm_migrate_vma_to_vram() 707 buf = kvcalloc(npages, in svm_migrate_vma_to_ram() 737 if (cpages != npages) in svm_migrate_vma_to_ram() 739 cpages, npages); in svm_migrate_vma_to_ram() [all …]
|
| /linux/mm/ |
| A D | migrate_device.c | 28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 50 migrate->npages++; in migrate_vma_collect_hole() 363 unsigned long npages, in migrate_device_unmap() argument 372 for (i = 0; i < npages; i++) { in migrate_device_unmap() 542 args->npages = 0; in migrate_vma_setup() 691 for (i = 0; i < npages; i++) { in __migrate_device_pages() 782 unsigned long npages) in migrate_device_pages() argument 817 for (i = 0; i < npages; i++) { in migrate_device_finalize() [all …]
|
| /linux/arch/powerpc/kernel/ |
| A D | iommu.c | 223 int largealloc = npages > 15; in iommu_range_alloc() 235 if (unlikely(npages == 0)) { in iommu_range_alloc() 325 end = n + npages; in iommu_range_alloc() 390 unsigned int npages) in iommu_free_check() argument 437 unsigned int npages) in __iommu_free() argument 459 unsigned int npages) in iommu_free() argument 526 npages); in ppc_iommu_map_sg() 627 unsigned int npages; in ppc_iommu_unmap_sg() local 861 unsigned int npages, align; in iommu_map_page() local 883 npages); in iommu_map_page() [all …]
|
| /linux/drivers/fpga/ |
| A D | dfl-afu-dma-region.c | 37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local 41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages() 45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages() 85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local 88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages() 90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages() 92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages() 104 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages() local [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| A D | mthca_memfree.c | 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() 189 chunk->npages, in mthca_alloc_icm() 199 npages -= 1 << cur_order; in mthca_alloc_icm() 528 int npages; in mthca_init_user_db_tab() local 540 for (i = 0; i < npages; ++i) { in mthca_init_user_db_tab() [all …]
|
| A D | mthca_allocator.c | 195 int npages, shift; in mthca_buf_alloc() local 202 npages = 1; in mthca_buf_alloc() 214 npages *= 2; in mthca_buf_alloc() 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 222 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 240 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
| /linux/arch/x86/mm/ |
| A D | cpu_entry_area.c | 108 unsigned int npages; in percpu_setup_debug_store() local 115 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 125 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 126 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store() 134 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ 136 estacks->name## _stack, npages, PAGE_KERNEL); \ 143 unsigned int npages; in percpu_setup_exception_stacks() local
|
| /linux/drivers/infiniband/core/ |
| A D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
| A D | umem.c | 152 unsigned long npages; in ib_umem_get() local 191 npages = ib_umem_num_pages(umem); in ib_umem_get() 192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 211 while (npages) { in ib_umem_get() 214 min_t(unsigned long, npages, in ib_umem_get() 224 npages -= pinned; in ib_umem_get() 228 npages, GFP_KERNEL); in ib_umem_get()
|
| /linux/tools/testing/selftests/kvm/ |
| A D | memslot_perf_test.c | 88 uint64_t npages; member 267 mempages = data->npages; in get_max_slots() 300 data->npages = mempages; in prepare_vm() 319 uint64_t npages; in prepare_vm() local 321 npages = data->pages_per_slot; in prepare_vm() 323 npages += rempages; in prepare_vm() 333 uint64_t npages; in prepare_vm() local 338 npages += rempages; in prepare_vm() 644 uint64_t npages; in test_memslot_do_unmap() local 650 npages = min(npages, count - ctr); in test_memslot_do_unmap() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| A D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|