Home
last modified time | relevance | path

Searched refs:ibcq (Results 1 – 25 of 67) sorted by relevance

123

/linux/drivers/infiniband/sw/rdmavt/
A Dcq.c56 head = cq->ibcq.cqe; in rvt_cq_enter()
69 if (cq->ibcq.event_handler) { in rvt_cq_enter()
73 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
141 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
251 cq->ibcq.cqe = entries; in rvt_create_cq()
396 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
398 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
417 cq->ibcq.cqe = cqe; in rvt_resize_cq()
495 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
[all …]
A Dcq.h12 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
14 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
15 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
16 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
17 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
/linux/drivers/infiniband/hw/mlx4/
A Dcq.c45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() local
46 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx4_ib_cq_comp()
52 struct ib_cq *ibcq; in mlx4_ib_cq_event() local
60 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
61 if (ibcq->event_handler) { in mlx4_ib_cq_event()
64 event.element.cq = ibcq; in mlx4_ib_cq_event()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx4_ib_cq_event()
405 if (ibcq->uobject) { in mlx4_ib_resize_cq()
429 if (ibcq->uobject) { in mlx4_ib_resize_cq()
446 tmp_cqe = cq->ibcq.cqe; in mlx4_ib_resize_cq()
[all …]
/linux/drivers/infiniband/hw/mana/
A Dcq.c8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in mana_ib_create_cq() argument
12 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); in mana_ib_create_cq()
15 struct ib_device *ibdev = ibcq->device; in mana_ib_create_cq()
88 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in mana_ib_destroy_cq() argument
90 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); in mana_ib_destroy_cq()
91 struct ib_device *ibdev = ibcq->device; in mana_ib_destroy_cq()
112 if (cq->ibcq.comp_handler) in mana_ib_cq_handler()
113 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mana_ib_cq_handler()
A Dqp.c108 struct ib_cq *ibcq; in mana_ib_create_qp_rss() local
183 ibcq = ibwq->cq; in mana_ib_create_qp_rss()
184 cq = container_of(ibcq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_rss()
249 ibcq = ibwq->cq; in mana_ib_create_qp_rss()
251 cq = container_of(ibcq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_rss()
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_raw()
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c67 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq()
83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
133 cq->ibcq.cqe = entries; in pvrdma_create_cq()
196 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
289 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe()
294 cq->ibcq.cqe); in _pvrdma_flush_cqe()
299 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe()
303 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
305 tail = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
316 cq->ibcq.cqe); in _pvrdma_flush_cqe()
[all …]
A Dpvrdma_main.c186 INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
321 if (cq && cq->ibcq.event_handler) { in pvrdma_cq_event()
322 struct ib_cq *ibcq = &cq->ibcq; in pvrdma_cq_event() local
325 e.device = ibcq->device; in pvrdma_cq_event()
326 e.element.cq = ibcq; in pvrdma_cq_event()
328 ibcq->event_handler(&e, ibcq->cq_context); in pvrdma_cq_event()
492 if (cq && cq->ibcq.comp_handler) in pvrdma_intrx_handler()
493 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in pvrdma_intrx_handler()
/linux/drivers/infiniband/sw/rxe/
A Drxe_cq.c68 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
82 cq->ibcq.cqe = cqe; in rxe_cq_resize_queue()
101 if (cq->ibcq.event_handler) { in rxe_cq_post()
102 ev.device = cq->ibcq.device; in rxe_cq_post()
103 ev.element.cq = &cq->ibcq; in rxe_cq_post()
105 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
119 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_cq_post()
A Drxe_verbs.c1059 struct ib_device *dev = ibcq->device; in rxe_create_cq()
1061 struct rxe_cq *cq = to_rcq(ibcq); in rxe_create_cq()
1112 struct rxe_cq *cq = to_rcq(ibcq); in rxe_resize_cq()
1113 struct rxe_dev *rxe = to_rdev(ibcq->device); in rxe_resize_cq()
1148 struct rxe_cq *cq = to_rcq(ibcq); in rxe_poll_cq()
1166 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) in rxe_peek_cq() argument
1168 struct rxe_cq *cq = to_rcq(ibcq); in rxe_peek_cq()
1178 struct rxe_cq *cq = to_rcq(ibcq); in rxe_req_notify_cq()
1195 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in rxe_destroy_cq() argument
1197 struct rxe_cq *cq = to_rcq(ibcq); in rxe_destroy_cq()
[all …]
/linux/drivers/infiniband/hw/mthca/
A Dmthca_cq.c230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
254 event.element.cq = &cq->ibcq; in mthca_cq_event()
255 if (cq->ibcq.event_handler) in mthca_cq_event()
256 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); in mthca_cq_event()
337 cq->cons_index &= cq->ibcq.cqe; in mthca_cq_resize_copy_cqes()
339 cq->cons_index -= cq->ibcq.cqe + 1; in mthca_cq_resize_copy_cqes()
658 struct mthca_cq *cq = to_mcq(ibcq); in mthca_poll_cq()
698 cq->cons_index &= cq->ibcq.cqe; in mthca_poll_cq()
706 tcqe = cq->ibcq.cqe; in mthca_poll_cq()
738 struct mthca_cq *cq = to_mcq(ibcq); in mthca_arbel_arm_cq()
[all …]
A Dmthca_provider.h184 struct ib_cq ibcq; member
301 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
303 return container_of(ibcq, struct mthca_cq, ibcq); in to_mcq()
A Dmthca_provider.c575 static int mthca_create_cq(struct ib_cq *ibcq, in mthca_create_cq() argument
580 struct ib_device *ibdev = ibcq->device; in mthca_create_cq()
612 cq = to_mcq(ibcq); in mthca_create_cq()
697 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) in mthca_resize_cq() argument
699 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_resize_cq()
700 struct mthca_cq *cq = to_mcq(ibcq); in mthca_resize_cq()
711 if (entries == ibcq->cqe + 1) { in mthca_resize_cq()
751 tcqe = cq->ibcq.cqe; in mthca_resize_cq()
753 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq()
765 ibcq->cqe = entries - 1; in mthca_resize_cq()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dcq.c46 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() local
48 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx5_ib_cq_comp()
55 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() local
64 if (ibcq->event_handler) { in mlx5_ib_cq_event()
67 event.element.cq = ibcq; in mlx5_ib_cq_event()
68 ibcq->event_handler(&event, ibcq->cq_context); in mlx5_ib_cq_event()
949 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
981 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
1377 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1424 if (!ibcq) in mlx5_ib_get_cqe_size()
[all …]
A Drestrack.c151 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) in fill_res_cq_entry_raw() argument
153 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); in fill_res_cq_entry_raw()
154 struct mlx5_ib_cq *cq = to_mcq(ibcq); in fill_res_cq_entry_raw()
/linux/include/rdma/
A Drdmavt_cq.h47 struct ib_cq ibcq; member
60 static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) in ibcq_to_rvtcq() argument
62 return container_of(ibcq, struct rvt_cq, ibcq); in ibcq_to_rvtcq()
/linux/drivers/infiniband/hw/cxgb4/
A Dev.c105 event.device = chp->ibcq.device; in post_qp_event()
107 event.element.cq = &chp->ibcq; in post_qp_event()
115 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
234 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
/linux/drivers/infiniband/hw/erdma/
A Derdma_eq.c59 event.device = cq->ibcq.device; in erdma_aeq_event_handler()
60 event.element.cq = &cq->ibcq; in erdma_aeq_event_handler()
62 if (cq->ibcq.event_handler) in erdma_aeq_event_handler()
63 cq->ibcq.event_handler(&event, in erdma_aeq_event_handler()
64 cq->ibcq.cq_context); in erdma_aeq_event_handler()
164 if (rdma_is_kernel_res(&cq->ibcq.res)) in erdma_ceq_completion_handler()
167 if (cq->ibcq.comp_handler) in erdma_ceq_completion_handler()
168 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in erdma_ceq_completion_handler()
A Derdma_verbs.h263 struct ib_cq ibcq; member
314 static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq) in to_ecq() argument
316 return container_of(ibcq, struct erdma_cq, ibcq); in to_ecq()
331 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
346 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
348 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
362 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
A Derdma_cq.c33 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) in erdma_req_notify_cq() argument
35 struct erdma_cq *cq = to_ecq(ibcq); in erdma_req_notify_cq()
112 struct erdma_dev *dev = to_edev(cq->ibcq.device); in erdma_poll_one_cqe()
181 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in erdma_poll_cq() argument
183 struct erdma_cq *cq = to_ecq(ibcq); in erdma_poll_cq()
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_cq.c457 struct ib_cq *ibcq; in hns_roce_cq_completion() local
468 ibcq = &hr_cq->ib_cq; in hns_roce_cq_completion()
469 if (ibcq->comp_handler) in hns_roce_cq_completion()
470 ibcq->comp_handler(ibcq, ibcq->cq_context); in hns_roce_cq_completion()
478 struct ib_cq *ibcq; in hns_roce_cq_event() local
499 ibcq = &hr_cq->ib_cq; in hns_roce_cq_event()
500 if (ibcq->event_handler) { in hns_roce_cq_event()
501 event.device = ibcq->device; in hns_roce_cq_event()
502 event.element.cq = ibcq; in hns_roce_cq_event()
504 ibcq->event_handler(&event, ibcq->cq_context); in hns_roce_cq_event()
/linux/drivers/infiniband/hw/qedr/
A Dqedr_roce_cm.c82 cq->ibcq.comp_handler ? "Yes" : "No"); in qedr_ll2_complete_tx_packet()
92 if (cq->ibcq.comp_handler) in qedr_ll2_complete_tx_packet()
93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_tx_packet()
121 if (cq->ibcq.comp_handler) in qedr_ll2_complete_rx_packet()
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_rx_packet()
670 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in qedr_gsi_poll_cq() argument
672 struct qedr_dev *dev = get_qedr_dev(ibcq->device); in qedr_gsi_poll_cq()
673 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_gsi_poll_cq()
A Dverbs.h54 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
56 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
57 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
A Dmain.c233 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
475 if (!cq->destroyed && cq->ibcq.comp_handler) in qedr_irq_handler()
476 (*cq->ibcq.comp_handler) in qedr_irq_handler()
477 (&cq->ibcq, cq->ibcq.cq_context); in qedr_irq_handler()
656 struct ib_cq *ibcq; in qedr_affiliated_event() local
727 ibcq = &cq->ibcq; in qedr_affiliated_event()
728 if (ibcq->event_handler) { in qedr_affiliated_event()
729 event.device = ibcq->device; in qedr_affiliated_event()
730 event.element.cq = ibcq; in qedr_affiliated_event()
731 ibcq->event_handler(&event, ibcq->cq_context); in qedr_affiliated_event()
/linux/drivers/infiniband/hw/efa/
A Defa.h100 struct ib_cq ibcq; member
162 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
163 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/linux/drivers/infiniband/hw/ocrdma/
A Docrdma_verbs.h72 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
75 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);

Completed in 50 milliseconds

123