Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 25 of 54) sorted by relevance

123

/drivers/infiniband/core/
A Duverbs_std_types_qp.c95 struct ib_cq *send_cq = NULL; in UVERBS_HANDLER() local
168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
170 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
171 return PTR_ERR(send_cq); in UVERBS_HANDLER()
175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
177 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
178 return PTR_ERR(send_cq); in UVERBS_HANDLER()
234 attr.send_cq = send_cq; in UVERBS_HANDLER()
A Dverbs.c1190 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user()
1244 qp->send_cq = attr->send_cq; in create_qp()
1258 qp->send_cq = attr->send_cq; in create_qp()
1318 if (qp->send_cq) in ib_qp_usecnt_inc()
1319 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc()
1337 if (qp->send_cq) in ib_qp_usecnt_dec()
1338 atomic_dec(&qp->send_cq->usecnt); in ib_qp_usecnt_dec()
2834 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq()
2993 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
/drivers/infiniband/hw/hns/
A Dhns_roce_qp.c289 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; in add_qp_to_list()
1488 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1491 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1495 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1497 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1500 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1514 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1517 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1519 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1523 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
[all …]
A Dhns_roce_hw_v2.c2717 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp()
2775 qp_init_attr.send_cq = cq; in free_mr_alloc_res()
5298 if (ibqp->send_cq) in clear_qp()
5299 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp()
5610 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp()
5634 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local
5648 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; in hns_roce_v2_destroy_qp_common()
5652 hns_roce_lock_cqs(send_cq, recv_cq); in hns_roce_v2_destroy_qp_common()
5661 if (send_cq && send_cq != recv_cq) in hns_roce_v2_destroy_qp_common()
5662 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); in hns_roce_v2_destroy_qp_common()
[all …]
/drivers/infiniband/hw/mana/
A Dqp.c270 struct mana_ib_cq *send_cq = in mana_ib_create_qp_raw() local
340 cq_spec.gdma_region = send_cq->queue.gdma_region; in mana_ib_create_qp_raw()
343 eq_vec = send_cq->comp_vector; in mana_ib_create_qp_raw()
361 send_cq->queue.id = cq_spec.queue_index; in mana_ib_create_qp_raw()
364 err = mana_ib_install_cq_cb(mdev, send_cq); in mana_ib_create_qp_raw()
373 resp.cqid = send_cq->queue.id; in mana_ib_create_qp_raw()
387 mana_ib_remove_cq_cb(mdev, send_cq); in mana_ib_create_qp_raw()
605 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_add_qp_to_cqs() local
609 spin_lock_irqsave(&send_cq->cq_lock, flags); in mana_add_qp_to_cqs()
620 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_remove_qp_from_cqs() local
[all …]
A Dmain.c1023 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_ib_gd_create_rc_qp() local
1035 req.send_cq_handle = send_cq->cq_handle; in mana_ib_gd_create_rc_qp()
1082 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_ib_gd_create_ud_qp() local
1094 req.send_cq_handle = send_cq->cq_handle; in mana_ib_gd_create_ud_qp()
/drivers/infiniband/ulp/ipoib/
A Dipoib_verbs.c187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init()
189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init()
244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
A Dipoib_ib.c440 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
508 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll()
520 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll()
674 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
A Dipoib_cm.c255 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
770 rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_cm_send()
1061 .send_cq = priv->send_cq, in ipoib_cm_create_tx_qp()
/drivers/infiniband/hw/mlx5/
A Dqp.c1314 send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : in get_qp_ts_format()
2217 if (send_cq) in create_dci()
2420 if (send_cq) in create_user_qp()
2553 if (send_cq) in create_kernel_qp()
2570 if (send_cq) { in mlx5_ib_lock_cqs()
2600 if (send_cq) { in mlx5_ib_unlock_cqs()
2695 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, in destroy_qp_common()
2702 if (send_cq) in destroy_qp_common()
3154 params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn : in create_qp()
4284 if (send_cq) in __mlx5_ib_modify_qp()
[all …]
A Dgsi.c50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions()
141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi()
205 .send_cq = gsi->cq, in create_gsi_ud_qp()
/drivers/infiniband/hw/mthca/
A Dmthca_qp.c1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1293 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1339 if (send_cq == recv_cq) { in mthca_lock_cqs()
1340 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
1343 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
1354 if (send_cq == recv_cq) { in mthca_unlock_cqs()
1361 spin_unlock(&send_cq->lock); in mthca_unlock_cqs()
1368 struct mthca_cq *send_cq, in mthca_alloc_sqp() argument
1448 struct mthca_cq *send_cq; in mthca_free_qp() local
1451 send_cq = to_mcq(qp->ibqp.send_cq); in mthca_free_qp()
[all …]
A Dmthca_dev.h535 struct mthca_cq *send_cq,
544 struct mthca_cq *send_cq,
/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
A Dsend.h120 struct mlx5hws_send_ring_cq send_cq; member
228 struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq; in mlx5hws_send_engine_empty() local
230 return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe); in mlx5hws_send_engine_empty()
A Dsend.c603 struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; in hws_send_engine_poll_cq()
998 hws_send_ring_close_cq(&queue->send_ring.send_cq); in hws_send_ring_close()
1008 err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); in mlx5hws_send_ring_open()
1013 &ring->send_cq); in mlx5hws_send_ring_open()
1020 hws_send_ring_close_cq(&ring->send_cq); in mlx5hws_send_ring_open()
/drivers/infiniband/hw/mlx4/
A Dqp.c1313 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs()
1314 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
1317 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
1328 if (send_cq == recv_cq) { in mlx4_ib_unlock_cqs()
1364 *recv_cq = *send_cq; in get_cqs()
1367 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1368 *recv_cq = *send_cq; in get_cqs()
1373 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1455 if (send_cq != recv_cq) in destroy_qp_common()
2367 recv_cq = send_cq; in __mlx4_ib_modify_qp()
[all …]
/drivers/net/ethernet/ibm/ehea/
A Dehea_main.c804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local
813 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
815 ehea_inc_cq(send_cq); in ehea_proc_cqes()
855 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
1480 if (!pr->send_cq) { in ehea_init_port_res()
1487 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1562 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
[all …]
A Dehea.h351 struct ehea_cq *send_cq; member
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_qp.c58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
359 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp()
1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
/drivers/infiniband/sw/rxe/
A Drxe_qp.c75 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init()
356 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init()
417 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
/drivers/infiniband/ulp/srp/
A Dib_srp.c516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
529 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local
549 if (IS_ERR(send_cq)) { in srp_create_ch_ib()
550 ret = PTR_ERR(send_cq); in srp_create_ch_ib()
561 init_attr->send_cq = send_cq; in srp_create_ch_ib()
599 if (ch->send_cq) in srp_create_ch_ib()
600 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
604 ch->send_cq = send_cq; in srp_create_ch_ib()
622 ib_free_cq(send_cq); in srp_create_ch_ib()
667 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
[all …]
A Dib_srp.h157 struct ib_cq *send_cq; member
/drivers/infiniband/sw/siw/
A Dsiw_verbs.c361 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { in siw_create_qp()
405 qp->scq = to_siw_cq(attrs->send_cq); in siw_create_qp()
542 qp_init_attr->send_cq = base_qp->send_cq; in siw_query_qp()
/drivers/infiniband/hw/qedr/
A Dqedr_roce_cm.c62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); in qedr_store_gsi_qp_cq()
139 cq = get_qedr_cq(attrs->send_cq); in qedr_destroy_gsi_cq()
/drivers/infiniband/hw/hfi1/
A Dqp.c653 ib_cq_head(qp->ibqp.send_cq), in qp_iter_print()
654 ib_cq_tail(qp->ibqp.send_cq), in qp_iter_print()

Completed in 759 milliseconds

123