Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 104) sorted by relevance

12345

/linux-6.3-rc2/net/xdp/
A Dxdp_umem.c26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
64 kfree(umem); in xdp_umem_release()
81 if (!umem) in xdp_put_umem()
100 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN); in xdp_umem_pin_pages()
221 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs); in xdp_umem_reg()
239 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in xdp_umem_create()
240 if (!umem) in xdp_umem_create()
[all …]
A Dxsk_buff_pool.c56 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
80 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
84 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
86 pool->umem = umem; in xp_create_and_assign_umem()
87 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
100 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; in xp_create_and_assign_umem()
131 if (pool->umem->zc) { in xp_disable_drv_zc()
203 pool->umem->zc = true; in xp_assign_dev()
[all …]
A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
A Dumem.c46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
75 umem->bar = NULL; in nvkm_umem_unmap()
79 umem->map = NULL; in nvkm_umem_unmap()
94 if (umem->map) in nvkm_umem_map()
98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map()
131 return umem; in nvkm_umem_dtor()
164 if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL))) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
[all …]
/linux-6.3-rc2/drivers/infiniband/core/
A Dumem.c88 if (umem->is_odp) { in ib_umem_find_best_pgsz()
175 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in ib_umem_get()
176 if (!umem) in ib_umem_get()
185 umem->iova = addr; in ib_umem_get()
257 kfree(umem); in ib_umem_get()
269 if (!umem) in ib_umem_release()
273 if (umem->is_odp) in ib_umem_release()
276 __ib_umem_release(umem->ibdev, umem, 1); in ib_umem_release()
278 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); in ib_umem_release()
280 kfree(umem); in ib_umem_release()
[all …]
A Dumem_odp.c119 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
129 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
130 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
167 struct ib_umem *umem; in ib_umem_odp_alloc_child() local
176 umem = &odp_data->umem; in ib_umem_odp_alloc_child()
177 umem->ibdev = root->umem.ibdev; in ib_umem_odp_alloc_child()
178 umem->length = size; in ib_umem_odp_alloc_child()
180 umem->writable = root->umem.writable; in ib_umem_odp_alloc_child()
181 umem->owning_mm = root->umem.owning_mm; in ib_umem_odp_alloc_child()
198 mmput(umem->owning_mm); in ib_umem_odp_alloc_child()
[all …]
A Dumem_dmabuf.c37 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
63 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
120 struct ib_umem *umem; in ib_umem_dmabuf_get() local
143 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get()
144 umem->ibdev = device; in ib_umem_dmabuf_get()
145 umem->length = size; in ib_umem_dmabuf_get()
146 umem->address = offset; in ib_umem_dmabuf_get()
147 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get()
148 umem->is_dmabuf = 1; in ib_umem_dmabuf_get()
150 if (!ib_umem_num_pages(umem)) in ib_umem_dmabuf_get()
[all …]
/linux-6.3-rc2/tools/testing/selftests/bpf/
A Dxsk.c86 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
227 umem = calloc(1, sizeof(*umem)); in xsk_umem__create()
228 if (!umem) in xsk_umem__create()
266 free(umem); in xsk_umem__create()
342 struct xsk_umem *umem = ctx->umem; in xsk_put_ctx() local
393 ctx->umem = umem; in xsk_create_ctx()
577 if (!umem) in xsk_socket__create()
590 if (!umem) in xsk_umem__delete()
597 if (!err && umem->fill_save && umem->comp_save) { in xsk_umem__delete()
605 free(umem); in xsk_umem__delete()
[all …]
A Dxskxceiver.c257 return !!ifobj->umem->umem; in is_umem_valid()
286 ret = xsk_umem__create(&umem->umem, buffer, size, in xsk_configure_umem()
287 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem()
322 xsk->umem = umem; in __xsk_configure_socket()
372 xsk_umem__delete(umem->umem); in ifobj_zc_avail()
478 memset(ifobj->umem, 0, sizeof(*ifobj->umem)); in __test_spec_init()
636 pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, in pkt_stream_generate()
663 struct xsk_umem_info *umem = ifobj->umem; in __pkt_stream_replace_half() local
898 struct xsk_umem_info *umem = xsk->umem; in receive_pkts() local
1365 xsk_umem__delete(ifobj->umem->umem); in testapp_clean_xsk_umem()
[all …]
/linux-6.3-rc2/include/rdma/
A Dib_umem.h32 struct ib_umem umem; member
45 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf()
51 return umem->address & ~PAGE_MASK; in ib_umem_offset()
57 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset()
64 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
65 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
71 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages()
75 struct ib_umem *umem, in __rdma_umem_block_iter_start() argument
79 umem->sgt_append.sgt.nents, pgsz); in __rdma_umem_block_iter_start()
101 void ib_umem_release(struct ib_umem *umem);
[all …]
A Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux-6.3-rc2/drivers/infiniband/sw/siw/
A Dsiw_mem.c85 kfree(umem->page_chunk); in siw_umem_release()
86 kfree(umem); in siw_umem_release()
367 struct siw_umem *umem; in siw_umem_get() local
384 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in siw_umem_get()
385 if (!umem) in siw_umem_get()
389 umem->owning_mm = mm_s; in siw_umem_get()
390 umem->writable = writable; in siw_umem_get()
407 umem->page_chunk = in siw_umem_get()
409 if (!umem->page_chunk) { in siw_umem_get()
429 umem->num_pages += rv; in siw_umem_get()
[all …]
A Dsiw_mem.h10 void siw_umem_release(struct siw_umem *umem, bool dirty);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux-6.3-rc2/drivers/infiniband/hw/mana/
A Dwq.c16 struct ib_umem *umem; in mana_ib_create_wq() local
35 umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, in mana_ib_create_wq()
37 if (IS_ERR(umem)) { in mana_ib_create_wq()
38 err = PTR_ERR(umem); in mana_ib_create_wq()
44 wq->umem = umem; in mana_ib_create_wq()
49 err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region); in mana_ib_create_wq()
66 ib_umem_release(umem); in mana_ib_create_wq()
90 ib_umem_release(wq->umem); in mana_ib_destroy_wq()
A Dcq.c35 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, in mana_ib_create_cq()
37 if (IS_ERR(cq->umem)) { in mana_ib_create_cq()
38 err = PTR_ERR(cq->umem); in mana_ib_create_cq()
44 err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region); in mana_ib_create_cq()
63 ib_umem_release(cq->umem); in mana_ib_create_cq()
76 ib_umem_release(cq->umem); in mana_ib_destroy_cq()
A Dmr.c128 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
129 if (IS_ERR(mr->umem)) { in mana_ib_reg_user_mr()
130 err = PTR_ERR(mr->umem); in mana_ib_reg_user_mr()
136 err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle); in mana_ib_reg_user_mr()
171 ib_umem_release(mr->umem); in mana_ib_reg_user_mr()
191 if (mr->umem) in mana_ib_dereg_mr()
192 ib_umem_release(mr->umem); in mana_ib_dereg_mr()
/linux-6.3-rc2/drivers/infiniband/hw/mlx4/
A Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
450 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
507 if (IS_ERR(mmr->umem)) { in mlx4_ib_rereg_user_mr()
508 err = PTR_ERR(mmr->umem); in mlx4_ib_rereg_user_mr()
510 mmr->umem = NULL; in mlx4_ib_rereg_user_mr()
606 if (mr->umem) in mlx4_ib_dereg_mr()
607 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
[all …]
A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/linux-6.3-rc2/drivers/infiniband/hw/mlx5/
A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
A Dmr.c1084 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1133 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1180 mr->umem = umem; in alloc_cacheable_mr()
1263 mr->umem = umem; in reg_create()
1491 if (IS_ERR(umem)) in mlx5_ib_reg_user_mr()
1704 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1705 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1888 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) in mlx5_ib_dereg_mr()
1899 if (mr->umem) { in mlx5_ib_dereg_mr()
1998 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
[all …]
/linux-6.3-rc2/lib/
A Dtest_user_copy.c47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument
65 umem += start; in test_check_nonzero_user()
87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user()
93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user()
105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument
124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user()
134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user()
165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user()
[all …]
/linux-6.3-rc2/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/linux-6.3-rc2/drivers/vdpa/vdpa_user/
A Dvduse_dev.c943 if (!dev->umem) in vduse_dev_dereg_umem()
953 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
956 kfree(dev->umem); in vduse_dev_dereg_umem()
957 dev->umem = NULL; in vduse_dev_dereg_umem()
980 if (dev->umem) in vduse_dev_reg_umem()
987 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in vduse_dev_reg_umem()
1017 dev->umem = umem; in vduse_dev_reg_umem()
1026 kfree(umem); in vduse_dev_reg_umem()
1208 if (copy_from_user(&umem, argp, sizeof(umem))) in vduse_dev_ioctl()
1217 umem.uaddr, umem.size); in vduse_dev_ioctl()
[all …]
/linux-6.3-rc2/drivers/infiniband/hw/hns/
A Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()

Completed in 45 milliseconds

12345