Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 79) sorted by relevance

1234

/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
A Dumem.c46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
75 umem->bar = NULL; in nvkm_umem_unmap()
79 umem->map = NULL; in nvkm_umem_unmap()
94 if (umem->map) in nvkm_umem_map()
98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map()
131 return umem; in nvkm_umem_dtor()
164 if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL))) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
[all …]
/drivers/infiniband/core/
A Dumem.c93 if (umem->is_odp) { in ib_umem_find_best_pgsz()
186 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in ib_umem_get()
187 if (!umem) in ib_umem_get()
196 umem->iova = addr; in ib_umem_get()
268 kfree(umem); in ib_umem_get()
280 if (!umem) in ib_umem_release()
284 if (umem->is_odp) in ib_umem_release()
287 __ib_umem_release(umem->ibdev, umem, 1); in ib_umem_release()
289 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); in ib_umem_release()
291 kfree(umem); in ib_umem_release()
[all …]
A Dumem_odp.c54 umem_odp->umem.is_odp = 1; in ib_init_umem_implicit_odp()
137 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
146 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
147 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
178 struct ib_umem *umem; in ib_umem_odp_alloc_child() local
187 umem = &odp_data->umem; in ib_umem_odp_alloc_child()
188 umem->ibdev = root->umem.ibdev; in ib_umem_odp_alloc_child()
191 umem->writable = root->umem.writable; in ib_umem_odp_alloc_child()
192 umem->owning_mm = root->umem.owning_mm; in ib_umem_odp_alloc_child()
209 mmput(umem->owning_mm); in ib_umem_odp_alloc_child()
[all …]
A Dumem_dmabuf.c40 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
125 struct ib_umem *umem; in ib_umem_dmabuf_get_with_dma_device() local
148 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get_with_dma_device()
149 umem->ibdev = device; in ib_umem_dmabuf_get_with_dma_device()
150 umem->length = size; in ib_umem_dmabuf_get_with_dma_device()
151 umem->address = offset; in ib_umem_dmabuf_get_with_dma_device()
152 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get_with_dma_device()
153 umem->is_dmabuf = 1; in ib_umem_dmabuf_get_with_dma_device()
155 if (!ib_umem_num_pages(umem)) in ib_umem_dmabuf_get_with_dma_device()
[all …]
A Duverbs_std_types_cq.c72 struct ib_umem *umem = NULL; in UVERBS_HANDLER() local
138 umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE); in UVERBS_HANDLER()
139 if (IS_ERR(umem)) { in UVERBS_HANDLER()
140 ret = PTR_ERR(umem); in UVERBS_HANDLER()
169 umem = &umem_dmabuf->umem; in UVERBS_HANDLER()
180 ib_umem_release(umem); in UVERBS_HANDLER()
194 ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) : in UVERBS_HANDLER()
/drivers/infiniband/sw/siw/
A Dsiw_mem.c46 if (umem->base_mem) in siw_umem_release()
53 kfree(umem->page_chunk); in siw_umem_release()
54 kfree(umem); in siw_umem_release()
336 struct siw_umem *umem; in siw_umem_get() local
350 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in siw_umem_get()
351 if (!umem) in siw_umem_get()
354 umem->page_chunk = in siw_umem_get()
356 if (!umem->page_chunk) { in siw_umem_get()
388 umem->num_pages++; in siw_umem_get()
395 return umem; in siw_umem_get()
[all …]
A Dsiw_mem.h11 void siw_umem_release(struct siw_umem *umem);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/drivers/infiniband/hw/mlx4/
A Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
91 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
159 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
160 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
192 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
249 if (IS_ERR(mmr->umem)) { in mlx4_ib_rereg_user_mr()
250 err = PTR_ERR(mmr->umem); in mlx4_ib_rereg_user_mr()
252 mmr->umem = NULL; in mlx4_ib_rereg_user_mr()
348 if (mr->umem) in mlx4_ib_dereg_mr()
349 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
[all …]
A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
A Dsrq.c117 srq->umem = in mlx4_ib_create_srq()
119 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq()
120 return PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
213 if (!srq->umem) in mlx4_ib_create_srq()
215 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
289 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
/drivers/infiniband/hw/mlx5/
A Dmem.c40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
58 struct ib_umem *umem, unsigned long pgsz_bitmap, in __mlx5_umem_find_best_quantized_pgoff() argument
66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
A Dmr.c1088 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1137 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1152 if (umem->is_dmabuf) in alloc_cacheable_mr()
1188 mr->umem = umem; in alloc_cacheable_mr()
1348 mr->umem = umem; in reg_create()
1602 if (IS_ERR(umem)) in mlx5_ib_reg_user_mr()
1927 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1928 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
2234 if (mr->umem) { in __mlx5_ib_dereg_mr()
2367 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
[all …]
A Dmlx5_ib.h83 struct ib_umem *umem, unsigned long pgsz_bitmap,
406 struct ib_umem *umem; member
424 struct ib_umem *umem; member
556 struct ib_umem *umem; member
609 struct ib_umem *umem; member
691 struct ib_umem *umem; member
745 mr->umem->is_odp; in is_odp_mr()
751 mr->umem->is_dmabuf; in is_dmabuf_mr()
1735 struct ib_umem *umem, int access_flags) in mlx5_umem_needs_ats() argument
1813 &umem_dmabuf->umem, in mlx5_umem_dmabuf_find_best_pgsz()
[all …]
A Dsrq_cmd.c125 if (in->umem) { in create_srq_cmd()
142 if (in->umem) in create_srq_cmd()
144 in->umem, in create_srq_cmd()
228 if (in->umem) { in create_xrc_srq_cmd()
247 if (in->umem) in create_xrc_srq_cmd()
249 in->umem, in create_xrc_srq_cmd()
341 if (in->umem) { in create_rmp_cmd()
365 if (in->umem) in create_rmp_cmd()
367 in->umem, in create_rmp_cmd()
504 if (in->umem) in create_xrq_cmd()
[all …]
/drivers/infiniband/sw/rxe/
A Drxe_odp.c46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_do_pagefault_and_lock()
66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_init_pages()
69 ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address, in rxe_odp_init_pages()
70 mr->umem->length, in rxe_odp_init_pages()
109 mr->umem = &umem_odp->umem; in rxe_odp_mr_init_user()
113 mr->page_offset = ib_umem_offset(&umem_odp->umem); in rxe_odp_mr_init_user()
162 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_map_range_and_lock()
235 if (unlikely(!mr->umem->is_odp)) in rxe_odp_mr_copy()
447 umem_odp = to_ib_umem_odp(work->frags[i].mr->umem); in rxe_ib_prefetch_mr_work()
480 !mr->umem->writable) { in rxe_ib_prefetch_sg_list()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_mr.c119 struct ib_umem *umem; in pvrdma_reg_user_mr() local
134 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
135 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
138 return ERR_CAST(umem); in pvrdma_reg_user_mr()
141 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
157 mr->umem = umem; in pvrdma_reg_user_mr()
166 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
195 ib_umem_release(umem); in pvrdma_reg_user_mr()
262 mr->umem = NULL; in pvrdma_alloc_mr()
299 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
A Dpvrdma_cq.c142 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
144 if (IS_ERR(cq->umem)) { in pvrdma_create_cq()
145 ret = PTR_ERR(cq->umem); in pvrdma_create_cq()
149 npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE); in pvrdma_create_cq()
177 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); in pvrdma_create_cq()
220 ib_umem_release(cq->umem); in pvrdma_create_cq()
232 ib_umem_release(cq->umem); in pvrdma_free_cq()
/drivers/infiniband/hw/mana/
A Dmr.c137 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
138 if (IS_ERR(mr->umem)) { in mana_ib_reg_user_mr()
139 err = PTR_ERR(mr->umem); in mana_ib_reg_user_mr()
145 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova); in mana_ib_reg_user_mr()
186 ib_umem_release(mr->umem); in mana_ib_reg_user_mr()
227 mr->umem = &umem_dmabuf->umem; in mana_ib_reg_user_mr_dmabuf()
229 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova); in mana_ib_reg_user_mr_dmabuf()
259 ib_umem_release(mr->umem); in mana_ib_reg_user_mr_dmabuf()
311 if (mr->umem) in mana_ib_dereg_mr()
312 ib_umem_release(mr->umem); in mana_ib_dereg_mr()
A Dmain.c267 struct ib_umem *umem; in mana_ib_create_queue() local
270 queue->umem = NULL; in mana_ib_create_queue()
275 if (IS_ERR(umem)) { in mana_ib_create_queue()
276 err = PTR_ERR(umem); in mana_ib_create_queue()
286 queue->umem = umem; in mana_ib_create_queue()
292 ib_umem_release(umem); in mana_ib_create_queue()
302 ib_umem_release(queue->umem); in mana_ib_destroy_queue()
392 num_pages_total = ib_umem_num_dma_blocks(umem, page_sz); in mana_ib_gd_create_dma_region()
410 create_req->length = umem->length; in mana_ib_gd_create_dma_region()
416 umem->length, num_pages_total); in mana_ib_gd_create_dma_region()
[all …]
/drivers/infiniband/hw/hns/
A Dhns_roce_mr.c571 if (mtr->umem) { in mtr_free_bufs()
572 ib_umem_release(mtr->umem); in mtr_free_bufs()
573 mtr->umem = NULL; in mtr_free_bufs()
596 if (IS_ERR(mtr->umem)) { in mtr_alloc_bufs()
598 PTR_ERR(mtr->umem)); in mtr_alloc_bufs()
602 mtr->umem = NULL; in mtr_alloc_bufs()
657 if (mtr->umem) in mtr_map_bufs()
862 if (mtr->umem) in get_best_hop_num()
929 cfg->buf_pg_count = mtr->umem ? in mtr_init_buf_cfg()
933 pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0; in mtr_init_buf_cfg()
[all …]
A Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/drivers/vdpa/vdpa_user/
A Dvduse_dev.c1038 if (!dev->umem) in vduse_dev_dereg_umem()
1051 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1054 kfree(dev->umem); in vduse_dev_dereg_umem()
1055 dev->umem = NULL; in vduse_dev_dereg_umem()
1078 if (dev->umem) in vduse_dev_reg_umem()
1085 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in vduse_dev_reg_umem()
1115 dev->umem = umem; in vduse_dev_reg_umem()
1124 kfree(umem); in vduse_dev_reg_umem()
1333 if (copy_from_user(&umem, argp, sizeof(umem))) in vduse_dev_ioctl()
1343 umem.uaddr, umem.size); in vduse_dev_ioctl()
[all …]
/drivers/infiniband/sw/rdmavt/
A Dmr.c343 struct ib_umem *umem; in rvt_reg_user_mr() local
354 umem = ib_umem_get(pd->device, start, length, mr_access_flags); in rvt_reg_user_mr()
355 if (IS_ERR(umem)) in rvt_reg_user_mr()
356 return ERR_CAST(umem); in rvt_reg_user_mr()
358 n = ib_umem_num_pages(umem); in rvt_reg_user_mr()
369 mr->mr.offset = ib_umem_offset(umem); in rvt_reg_user_mr()
371 mr->umem = umem; in rvt_reg_user_mr()
376 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { in rvt_reg_user_mr()
398 ib_umem_release(umem); in rvt_reg_user_mr()
526 ib_umem_release(mr->umem); in rvt_dereg_mr()
[all …]

Completed in 65 milliseconds

1234