Home
last modified time | relevance | path

Searched refs:uctx (Results 1 – 25 of 40) sorted by relevance

12

/drivers/gpu/drm/vmwgfx/
A Dvmwgfx_context.c131 res = uctx->cotables[i]; in vmw_context_cotables_unref()
132 uctx->cotables[i] = NULL; in vmw_context_cotables_unref()
200 if (IS_ERR(uctx->man)) { in vmw_gb_context_init()
201 ret = PTR_ERR(uctx->man); in vmw_gb_context_init()
202 uctx->man = NULL; in vmw_gb_context_init()
208 if (IS_ERR(uctx->cbs)) { in vmw_gb_context_init()
209 ret = PTR_ERR(uctx->cbs); in vmw_gb_context_init()
220 &uctx->res, i); in vmw_gb_context_init()
604 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx && in vmw_dx_context_unbind()
876 if (uctx->dx_query_mob && uctx->dx_query_mob != mob) in vmw_context_bind_dx_query()
[all …]
/drivers/fwctl/
A Dmain.c27 struct fwctl_uctx *uctx; member
186 ucmd.uctx = uctx; in fwctl_fops_ioctl()
202 if (!uctx->fwctl->ops) in fwctl_fops_ioctl()
219 if (!uctx) in fwctl_fops_open()
222 uctx->fwctl = fwctl; in fwctl_fops_open()
239 list_del(&uctx->uctx_list_entry); in fwctl_destroy_uctx()
240 uctx->fwctl->ops->close_uctx(uctx); in fwctl_destroy_uctx()
255 fwctl_destroy_uctx(uctx); in fwctl_fops_release()
259 kfree(uctx); in fwctl_fops_release()
374 struct fwctl_uctx *uctx; in fwctl_unregister() local
[all …]
/drivers/fwctl/mlx5/
A Dmain.c19 struct fwctl_uctx uctx; member
94 void *uctx; in mlx5ctl_alloc_uid() local
98 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx); in mlx5ctl_alloc_uid()
102 MLX5_SET(uctx, uctx, cap, cap); in mlx5ctl_alloc_uid()
126 static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx) in mlx5ctl_open_uctx() argument
129 container_of(uctx, struct mlx5ctl_uctx, uctx); in mlx5ctl_open_uctx()
131 container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl); in mlx5ctl_open_uctx()
151 static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx) in mlx5ctl_close_uctx() argument
156 container_of(uctx, struct mlx5ctl_uctx, uctx); in mlx5ctl_close_uctx()
164 container_of(uctx, struct mlx5ctl_uctx, uctx); in mlx5ctl_info()
[all …]
/drivers/scsi/qla2xxx/
A Dqla_nvme.c248 if (!uctx) { in qla_nvme_release_lsrsp_cmd_kref()
254 uctx->sp = NULL; in qla_nvme_release_lsrsp_cmd_kref()
263 kfree(uctx); in qla_nvme_release_lsrsp_cmd_kref()
395 uctx->sp = sp; in qla_nvme_xmt_ls_rsp()
416 uctx->ox_id, uctx->nport_handle); in qla_nvme_xmt_ls_rsp()
444 kfree(uctx); in qla_nvme_xmt_ls_rsp()
1200 kfree(uctx); in qla2xxx_process_purls_pkt()
1289 uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC); in qla2xxx_process_purls_iocb()
1290 if (!uctx) { in qla2xxx_process_purls_iocb()
1299 uctx->vha = vha; in qla2xxx_process_purls_iocb()
[all …]
/drivers/infiniband/hw/cxgb4/
A Dresource.c114 mutex_lock(&uctx->lock); in c4iw_get_cqid()
115 if (!list_empty(&uctx->cqids)) { in c4iw_get_cqid()
154 mutex_unlock(&uctx->lock); in c4iw_get_cqid()
164 struct c4iw_dev_ucontext *uctx) in c4iw_put_cqid() argument
173 mutex_lock(&uctx->lock); in c4iw_put_cqid()
175 mutex_unlock(&uctx->lock); in c4iw_put_cqid()
184 mutex_lock(&uctx->lock); in c4iw_get_qpid()
185 if (!list_empty(&uctx->qpids)) { in c4iw_get_qpid()
228 mutex_unlock(&uctx->lock); in c4iw_get_qpid()
247 mutex_lock(&uctx->lock); in c4iw_put_qpid()
[all …]
A Dcq.c38 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, in destroy_cq() argument
67 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
71 struct c4iw_dev_ucontext *uctx, in create_cq() argument
77 int user = (uctx != &rdev->uctx); in create_cq()
83 ucontext = container_of(uctx, struct c4iw_ucontext, uctx); in create_cq()
85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
179 c4iw_put_cqid(rdev, cq->cqid, uctx); in create_cq()
991 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, in c4iw_destroy_cq()
1080 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
1157 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
A Diw_cxgb4.h175 struct c4iw_dev_ucontext uctx; member
523 struct c4iw_dev_ucontext uctx; member
970 struct c4iw_dev_ucontext *uctx);
987 struct c4iw_dev_ucontext *uctx);
989 struct c4iw_dev_ucontext *uctx);
1044 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1046 struct c4iw_dev_ucontext *uctx);
1047 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1049 struct c4iw_dev_ucontext *uctx);
A Dqp.c159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp()
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
201 struct c4iw_dev_ucontext *uctx, in create_qp() argument
205 int user = (uctx != &rdev->uctx); in create_qp()
2111 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2185 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_qp()
2377 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2541 int user = (uctx != &rdev->uctx); in alloc_srq_queue()
2755 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2819 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
[all …]
A Ddevice.c748 struct c4iw_dev_ucontext *uctx) in c4iw_release_dev_ucontext() argument
753 mutex_lock(&uctx->lock); in c4iw_release_dev_ucontext()
754 list_for_each_safe(pos, nxt, &uctx->qpids) { in c4iw_release_dev_ucontext()
767 list_for_each_safe(pos, nxt, &uctx->cqids) { in c4iw_release_dev_ucontext()
772 mutex_unlock(&uctx->lock); in c4iw_release_dev_ucontext()
776 struct c4iw_dev_ucontext *uctx) in c4iw_init_dev_ucontext() argument
778 INIT_LIST_HEAD(&uctx->qpids); in c4iw_init_dev_ucontext()
779 INIT_LIST_HEAD(&uctx->cqids); in c4iw_init_dev_ucontext()
780 mutex_init(&uctx->lock); in c4iw_init_dev_ucontext()
789 c4iw_init_dev_ucontext(rdev, &rdev->uctx); in c4iw_rdev_open()
[all …]
/drivers/infiniband/hw/ocrdma/
A Docrdma_verbs.c418 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata); in ocrdma_alloc_ucontext_pd()
424 uctx->cntxt_pd->uctx = uctx; in ocrdma_alloc_ucontext_pd()
451 pd = uctx->cntxt_pd; in ocrdma_get_ucontext_pd()
529 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); in ocrdma_dealloc_ucontext()
530 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, in ocrdma_dealloc_ucontext()
618 pd->uctx = uctx; in ocrdma_copy_pd_uresp()
676 uctx = pd->uctx; in ocrdma_dealloc_pd()
677 if (uctx) { in ocrdma_dealloc_pd()
1715 if (!pd->uctx) { in ocrdma_destroy_qp()
1721 if (pd->uctx) { in ocrdma_destroy_qp()
[all …]
A Docrdma_verbs.h64 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
65 void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
A Docrdma_ah.c196 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { in ocrdma_create_ah()
197 ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr); in ocrdma_create_ah()
/drivers/fwctl/pds/
A Dmain.c20 struct fwctl_uctx uctx; member
41 static int pdsfc_open_uctx(struct fwctl_uctx *uctx) in pdsfc_open_uctx() argument
43 struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl); in pdsfc_open_uctx()
44 struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx); in pdsfc_open_uctx()
51 static void pdsfc_close_uctx(struct fwctl_uctx *uctx) in pdsfc_close_uctx() argument
55 static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length) in pdsfc_info() argument
57 struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx); in pdsfc_info()
349 static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, in pdsfc_fw_rpc() argument
352 struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl); in pdsfc_fw_rpc()
353 struct device *dev = &uctx->fwctl->dev; in pdsfc_fw_rpc()
/drivers/infiniband/hw/bnxt_re/
A Dib_verbs.c640 entry->uctx = uctx; in bnxt_re_mmap_entry_insert()
4308 uctx->rdev = rdev; in bnxt_re_alloc_ucontext()
4311 if (!uctx->shpg) { in bnxt_re_alloc_ucontext()
4372 uctx->shpg = NULL; in bnxt_re_alloc_ucontext()
4387 if (uctx->shpg) in bnxt_re_dealloc_ucontext()
4390 if (uctx->dpi.dbr) { in bnxt_re_dealloc_ucontext()
4553 if (IS_ERR(uctx)) in UVERBS_HANDLER()
4560 rdev = uctx->rdev; in UVERBS_HANDLER()
4623 struct bnxt_re_ucontext *uctx = entry->uctx; in alloc_page_obj_cleanup() local
4627 if (uctx && uctx->wcdpi.dbr) { in alloc_page_obj_cleanup()
[all …]
A Dib_verbs.h162 struct bnxt_re_ucontext *uctx; member
182 static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx) in bnxt_re_init_depth() argument
184 return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ? in bnxt_re_init_depth()
189 struct bnxt_re_ucontext *uctx) in bnxt_re_is_var_size_supported() argument
191 if (uctx) in bnxt_re_is_var_size_supported()
192 return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED; in bnxt_re_is_var_size_supported()
/drivers/infiniband/hw/hns/
A Dhns_roce_main.c350 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) in hns_roce_alloc_uar_entry() argument
352 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); in hns_roce_alloc_uar_entry()
357 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB); in hns_roce_alloc_uar_entry()
364 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, in hns_roce_alloc_ucontext() argument
367 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); in hns_roce_alloc_ucontext()
368 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); in hns_roce_alloc_ucontext()
411 ret = hns_roce_alloc_uar_entry(uctx); in hns_roce_alloc_ucontext()
459 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) in hns_roce_mmap() argument
461 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); in hns_roce_mmap()
473 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); in hns_roce_mmap()
[all …]
A Dhns_roce_cq.c229 struct hns_roce_ucontext *uctx; in alloc_cq_db() local
235 uctx = rdma_udata_to_drv_context(udata, in alloc_cq_db()
237 err = hns_roce_db_map_user(uctx, addr, &hr_cq->db); in alloc_cq_db()
262 struct hns_roce_ucontext *uctx; in free_cq_db() local
269 uctx = rdma_udata_to_drv_context(udata, in free_cq_db()
272 hns_roce_db_unmap_user(uctx, &hr_cq->db); in free_cq_db()
A Dhns_roce_srq.c409 struct hns_roce_ucontext *uctx; in free_srq_db() local
416 uctx = rdma_udata_to_drv_context(udata, in free_srq_db()
419 hns_roce_db_unmap_user(uctx, &srq->rdb); in free_srq_db()
430 struct hns_roce_ucontext *uctx; in alloc_srq_db() local
440 uctx = rdma_udata_to_drv_context(udata, in alloc_srq_db()
442 ret = hns_roce_db_map_user(uctx, ucmd.db_addr, in alloc_srq_db()
A Dhns_roce_qp.c840 struct hns_roce_ucontext *uctx = in qp_mmap_entry() local
849 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, in qp_mmap_entry()
872 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, in alloc_user_qp_db() local
879 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); in alloc_user_qp_db()
890 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); in alloc_user_qp_db()
904 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in alloc_user_qp_db()
981 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( in free_qp_db() local
986 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); in free_qp_db()
988 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in free_qp_db()
1095 struct hns_roce_ucontext *uctx; in set_qp_param() local
[all …]
/drivers/infiniband/hw/erdma/
A Derdma_verbs.c118 if (uctx->ext_db.enable) { in create_qp_cmd()
123 uctx->ext_db.sdb_off) | in create_qp_cmd()
125 uctx->ext_db.rdb_off); in create_qp_cmd()
243 if (uctx->ext_db.enable) { in create_cq_cmd()
247 uctx->ext_db.cdb_off); in create_cq_cmd()
937 ret = erdma_map_user_dbrecords(uctx, dbrec_va, in init_user_qp()
1012 if (uctx) { in erdma_create_qp()
1047 ret = create_qp_cmd(uctx, qp); in erdma_create_qp()
1056 if (uctx) in erdma_create_qp()
1057 free_user_qp(qp, uctx); in erdma_create_qp()
[all …]
/drivers/infiniband/sw/siw/
A Dsiw_verbs.c56 struct siw_ucontext *uctx = to_siw_ctx(ctx); in siw_mmap() local
124 struct siw_ucontext *uctx = to_siw_ctx(base_ctx); in siw_dealloc_ucontext() local
126 atomic_dec(&uctx->sdev->num_ctx); in siw_dealloc_ucontext()
273 siw_mmap_entry_insert(struct siw_ucontext *uctx, in siw_mmap_entry_insert() argument
286 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext, in siw_mmap_entry_insert()
316 struct siw_ucontext *uctx = in siw_create_qp() local
451 siw_mmap_entry_insert(uctx, qp->sendq, in siw_create_qp()
462 siw_mmap_entry_insert(uctx, qp->recvq, in siw_create_qp()
495 if (uctx) { in siw_create_qp()
604 struct siw_ucontext *uctx = in siw_destroy_qp() local
[all …]
/drivers/infiniband/hw/qedr/
A Dverbs.h45 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
46 void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
/drivers/infiniband/hw/usnic/
A Dusnic_ib_verbs.c637 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) in usnic_ib_alloc_ucontext() argument
639 struct ib_device *ibdev = uctx->device; in usnic_ib_alloc_ucontext()
640 struct usnic_ib_ucontext *context = to_ucontext(uctx); in usnic_ib_alloc_ucontext()
667 struct usnic_ib_ucontext *uctx = to_ucontext(context); in usnic_ib_mmap() local
686 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { in usnic_ib_mmap()
/drivers/cxl/core/
A Dfeatures.c359 static int cxlctl_open_uctx(struct fwctl_uctx *uctx) in cxlctl_open_uctx() argument
364 static void cxlctl_close_uctx(struct fwctl_uctx *uctx) in cxlctl_close_uctx() argument
642 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, in cxlctl_fw_rpc() argument
645 struct fwctl_device *fwctl_dev = uctx->fwctl; in cxlctl_fw_rpc()
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_verbs.c285 int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) in pvrdma_alloc_ucontext() argument
287 struct ib_device *ibdev = uctx->device; in pvrdma_alloc_ucontext()
289 struct pvrdma_ucontext *context = to_vucontext(uctx); in pvrdma_alloc_ucontext()

Completed in 88 milliseconds

12