Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 103) sorted by relevance

12345

/linux/net/xdp/
A Dxdp_umem.c26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
64 kfree(umem); in xdp_umem_release()
81 if (!umem) in xdp_put_umem()
100 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN); in xdp_umem_pin_pages()
234 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs); in xdp_umem_reg()
252 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in xdp_umem_create()
253 if (!umem) in xdp_umem_create()
[all …]
A Dxsk_buff_pool.c56 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
80 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
84 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
86 pool->umem = umem; in xp_create_and_assign_umem()
87 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
103 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; in xp_create_and_assign_umem()
147 if (pool->umem->zc) { in xp_disable_drv_zc()
232 pool->umem->zc = true; in xp_assign_dev()
[all …]
A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
A Dumem.c46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
75 umem->bar = NULL; in nvkm_umem_unmap()
79 umem->map = NULL; in nvkm_umem_unmap()
94 if (umem->map) in nvkm_umem_map()
98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map()
131 return umem; in nvkm_umem_dtor()
164 if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL))) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
[all …]
/linux/drivers/infiniband/core/
A Dumem.c90 if (umem->is_odp) { in ib_umem_find_best_pgsz()
170 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in ib_umem_get()
171 if (!umem) in ib_umem_get()
180 umem->iova = addr; in ib_umem_get()
252 kfree(umem); in ib_umem_get()
264 if (!umem) in ib_umem_release()
268 if (umem->is_odp) in ib_umem_release()
271 __ib_umem_release(umem->ibdev, umem, 1); in ib_umem_release()
273 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); in ib_umem_release()
275 kfree(umem); in ib_umem_release()
[all …]
A Dumem_odp.c119 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
129 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
130 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
167 struct ib_umem *umem; in ib_umem_odp_alloc_child() local
176 umem = &odp_data->umem; in ib_umem_odp_alloc_child()
177 umem->ibdev = root->umem.ibdev; in ib_umem_odp_alloc_child()
178 umem->length = size; in ib_umem_odp_alloc_child()
180 umem->writable = root->umem.writable; in ib_umem_odp_alloc_child()
181 umem->owning_mm = root->umem.owning_mm; in ib_umem_odp_alloc_child()
198 mmput(umem->owning_mm); in ib_umem_odp_alloc_child()
[all …]
A Dumem_dmabuf.c40 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
125 struct ib_umem *umem; in ib_umem_dmabuf_get_with_dma_device() local
148 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get_with_dma_device()
149 umem->ibdev = device; in ib_umem_dmabuf_get_with_dma_device()
150 umem->length = size; in ib_umem_dmabuf_get_with_dma_device()
151 umem->address = offset; in ib_umem_dmabuf_get_with_dma_device()
152 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get_with_dma_device()
153 umem->is_dmabuf = 1; in ib_umem_dmabuf_get_with_dma_device()
155 if (!ib_umem_num_pages(umem)) in ib_umem_dmabuf_get_with_dma_device()
[all …]
/linux/include/rdma/
A Dib_umem.h32 struct ib_umem umem; member
46 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf()
52 return umem->address & ~PAGE_MASK; in ib_umem_offset()
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset()
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
66 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages()
76 struct ib_umem *umem, in __rdma_umem_block_iter_start() argument
80 umem->sgt_append.sgt.nents, pgsz); in __rdma_umem_block_iter_start()
109 void ib_umem_release(struct ib_umem *umem);
[all …]
A Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux/tools/testing/selftests/bpf/
A Dxsk.c94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
237 umem = calloc(1, sizeof(*umem)); in xsk_umem__create()
238 if (!umem) in xsk_umem__create()
277 free(umem); in xsk_umem__create()
478 struct xsk_umem *umem = ctx->umem; in xsk_put_ctx() local
529 ctx->umem = umem; in xsk_create_ctx()
713 if (!umem) in xsk_socket__create()
726 if (!umem) in xsk_umem__delete()
733 if (!err && umem->fill_save && umem->comp_save) { in xsk_umem__delete()
741 free(umem); in xsk_umem__delete()
[all …]
A Dxskxceiver.c175 return !!ifobj->umem->umem; in is_umem_valid()
185 return umem->num_frames * umem->frame_size; in umem_size()
210 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem()
216 umem->base_addr = umem_size(umem); in xsk_configure_umem()
217 umem->next_buffer = umem_size(umem); in xsk_configure_umem()
228 umem->next_buffer += umem->frame_size; in umem_alloc_buffer()
229 if (umem->next_buffer >= umem->base_addr + umem_size(umem)) in umem_alloc_buffer()
230 umem->next_buffer = umem->base_addr; in umem_alloc_buffer()
267 xsk->umem = umem; in __xsk_configure_socket()
323 xsk_umem__delete(umem->umem); in ifobj_zc_avail()
[all …]
/linux/drivers/infiniband/sw/siw/
A Dsiw_mem.c70 if (umem->base_mem) in siw_umem_release()
77 kfree(umem->page_chunk); in siw_umem_release()
78 kfree(umem); in siw_umem_release()
360 struct siw_umem *umem; in siw_umem_get() local
374 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in siw_umem_get()
375 if (!umem) in siw_umem_get()
378 umem->page_chunk = in siw_umem_get()
380 if (!umem->page_chunk) { in siw_umem_get()
412 umem->num_pages++; in siw_umem_get()
419 return umem; in siw_umem_get()
[all …]
A Dsiw_mem.h11 void siw_umem_release(struct siw_umem *umem);
59 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
61 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
65 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
66 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux/lib/
A Dusercopy_kunit.c36 char __user *umem; member
50 char __user *umem = priv->umem; in usercopy_test_check_nonzero_user() local
66 umem += start; in usercopy_test_check_nonzero_user()
88 KUNIT_EXPECT_EQ_MSG(test, copy_to_user(umem, kmem, size), 0, in usercopy_test_check_nonzero_user()
94 int retval = check_zeroed_user(umem + start, len); in usercopy_test_check_nonzero_user()
109 char __user *umem = priv->umem; in usercopy_test_copy_struct_from_user() local
122 KUNIT_ASSERT_EQ_MSG(test, copy_to_user(umem, umem_src, size), 0, in usercopy_test_copy_struct_from_user()
163 KUNIT_EXPECT_EQ_MSG(test, clear_user(umem + ksize, usize - ksize), 0, in usercopy_test_copy_struct_from_user()
179 char __user *usermem = priv->umem; in usercopy_test_valid()
220 char __user *usermem = priv->umem; in usercopy_test_invalid()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dmem.c40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
58 struct ib_umem *umem, unsigned long pgsz_bitmap, in __mlx5_umem_find_best_quantized_pgoff() argument
66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
A Dmr.c1057 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1106 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1120 if (umem->is_dmabuf) in alloc_cacheable_mr()
1152 mr->umem = umem; in alloc_cacheable_mr()
1304 mr->umem = umem; in reg_create()
1541 if (IS_ERR(umem)) in mlx5_ib_reg_user_mr()
1849 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1850 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
2096 if (mr->umem) { in __mlx5_ib_dereg_mr()
2229 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
[all …]
/linux/drivers/infiniband/hw/mlx4/
A Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
450 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
507 if (IS_ERR(mmr->umem)) { in mlx4_ib_rereg_user_mr()
508 err = PTR_ERR(mmr->umem); in mlx4_ib_rereg_user_mr()
510 mmr->umem = NULL; in mlx4_ib_rereg_user_mr()
606 if (mr->umem) in mlx4_ib_dereg_mr()
607 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
[all …]
A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/linux/drivers/infiniband/hw/mana/
A Dmr.c123 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
124 if (IS_ERR(mr->umem)) { in mana_ib_reg_user_mr()
125 err = PTR_ERR(mr->umem); in mana_ib_reg_user_mr()
131 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova); in mana_ib_reg_user_mr()
165 ib_umem_release(mr->umem); in mana_ib_reg_user_mr()
185 if (mr->umem) in mana_ib_dereg_mr()
186 ib_umem_release(mr->umem); in mana_ib_dereg_mr()
A Dmain.c243 struct ib_umem *umem; in mana_ib_create_queue() local
246 queue->umem = NULL; in mana_ib_create_queue()
251 if (IS_ERR(umem)) { in mana_ib_create_queue()
252 err = PTR_ERR(umem); in mana_ib_create_queue()
262 queue->umem = umem; in mana_ib_create_queue()
268 ib_umem_release(umem); in mana_ib_create_queue()
278 ib_umem_release(queue->umem); in mana_ib_destroy_queue()
384 create_req->length = umem->length; in mana_ib_gd_create_dma_region()
390 umem->length, num_pages_total); in mana_ib_gd_create_dma_region()
399 rdma_umem_for_each_dma_block(umem, &biter, page_sz) { in mana_ib_gd_create_dma_region()
[all …]
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/linux/drivers/vdpa/vdpa_user/
A Dvduse_dev.c1037 if (!dev->umem) in vduse_dev_dereg_umem()
1050 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1053 kfree(dev->umem); in vduse_dev_dereg_umem()
1054 dev->umem = NULL; in vduse_dev_dereg_umem()
1077 if (dev->umem) in vduse_dev_reg_umem()
1084 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in vduse_dev_reg_umem()
1114 dev->umem = umem; in vduse_dev_reg_umem()
1123 kfree(umem); in vduse_dev_reg_umem()
1332 if (copy_from_user(&umem, argp, sizeof(umem))) in vduse_dev_ioctl()
1342 umem.uaddr, umem.size); in vduse_dev_ioctl()
[all …]

Completed in 57 milliseconds

12345