Lines Matching refs:ibqp

215 	struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;  in mlx4_ib_handle_qp_event()  local
218 event.device = ibqp->device; in mlx4_ib_handle_qp_event()
219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event()
252 ibqp->event_handler(&event, ibqp->qp_context); in mlx4_ib_handle_qp_event()
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event() local
267 if (!ibqp->event_handler) in mlx4_ib_qp_event()
770 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
1340 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1341 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1343 return to_mpd(qp->ibqp.pd); in get_pd()
1349 switch (qp->ibqp.qp_type) { in get_cqs()
1351 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
1355 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1359 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : in get_cqs()
1361 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1372 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); in destroy_qp_rss()
1374 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; in destroy_qp_rss()
1442 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1576 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp()
1604 rdma_restrack_no_track(&qp->ibqp.res); in _mlx4_ib_create_qp()
1607 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : in _mlx4_ib_create_qp()
1618 int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, in mlx4_ib_create_qp() argument
1621 struct ib_device *device = ibqp->device; in mlx4_ib_create_qp()
1623 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_create_qp()
1624 struct ib_pd *pd = ibqp->pd; in mlx4_ib_create_qp()
2138 struct ib_qp *ibqp; in __mlx4_ib_modify_qp() local
2140 ibqp = (struct ib_qp *)src; in __mlx4_ib_modify_qp()
2141 ibsrq = ibqp->srq; in __mlx4_ib_modify_qp()
2142 rwq_ind_tbl = ibqp->rwq_ind_tbl; in __mlx4_ib_modify_qp()
2143 qp_type = ibqp->qp_type; in __mlx4_ib_modify_qp()
2144 qp = to_mqp(ibqp); in __mlx4_ib_modify_qp()
2145 dev = to_mdev(ibqp->device); in __mlx4_ib_modify_qp()
2691 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in _mlx4_ib_modify_qp() argument
2694 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); in _mlx4_ib_modify_qp()
2695 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_modify_qp()
2703 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, in _mlx4_ib_modify_qp()
2708 ibqp->qp_num, cur_state, new_state, in _mlx4_ib_modify_qp()
2709 ibqp->qp_type, attr_mask); in _mlx4_ib_modify_qp()
2713 if (ibqp->rwq_ind_tbl) { in _mlx4_ib_modify_qp()
2719 ibqp->qp_num, cur_state, new_state); in _mlx4_ib_modify_qp()
2727 ibqp->qp_num, attr_mask, cur_state, new_state); in _mlx4_ib_modify_qp()
2736 if ((ibqp->qp_type == IB_QPT_RC) || in _mlx4_ib_modify_qp()
2737 (ibqp->qp_type == IB_QPT_UD) || in _mlx4_ib_modify_qp()
2738 (ibqp->qp_type == IB_QPT_UC) || in _mlx4_ib_modify_qp()
2739 (ibqp->qp_type == IB_QPT_RAW_PACKET) || in _mlx4_ib_modify_qp()
2740 (ibqp->qp_type == IB_QPT_XRC_INI)) { in _mlx4_ib_modify_qp()
2754 ibqp->qp_num, attr->port_num, cur_state, in _mlx4_ib_modify_qp()
2755 new_state, ibqp->qp_type); in _mlx4_ib_modify_qp()
2759 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && in _mlx4_ib_modify_qp()
2769 ibqp->qp_num, attr->pkey_index, cur_state, in _mlx4_ib_modify_qp()
2770 new_state, ibqp->qp_type); in _mlx4_ib_modify_qp()
2779 ibqp->qp_num, attr->max_rd_atomic, cur_state, in _mlx4_ib_modify_qp()
2780 new_state, ibqp->qp_type); in _mlx4_ib_modify_qp()
2788 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, in _mlx4_ib_modify_qp()
2789 new_state, ibqp->qp_type); in _mlx4_ib_modify_qp()
2798 if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) { in _mlx4_ib_modify_qp()
2799 err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num, in _mlx4_ib_modify_qp()
2805 err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask, in _mlx4_ib_modify_qp()
2808 if (ibqp->rwq_ind_tbl && err) in _mlx4_ib_modify_qp()
2809 bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata); in _mlx4_ib_modify_qp()
2819 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in mlx4_ib_modify_qp() argument
2822 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in mlx4_ib_modify_qp()
2828 ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata); in mlx4_ib_modify_qp()
2860 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); in build_sriov_qp0_header()
2862 struct ib_device *ib_dev = qp->ibqp.device; in build_sriov_qp0_header()
3013 struct ib_device *ib_dev = qp->ibqp.device; in build_mlx_header()
3037 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
3142 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
3191 !qp->ibqp.qp_num ? in build_mlx_header()
3196 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) in build_mlx_header()
3202 if (!qp->ibqp.qp_num) in build_mlx_header()
3216 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
3518 static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in _mlx4_ib_post_send() argument
3521 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_send()
3536 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in _mlx4_ib_post_send()
3572 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3725 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, in _mlx4_ib_post_send()
3821 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); in _mlx4_ib_post_send()
3833 int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mlx4_ib_post_send() argument
3836 return _mlx4_ib_post_send(ibqp, wr, bad_wr, false); in mlx4_ib_post_send()
3839 static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in _mlx4_ib_post_recv() argument
3842 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_recv()
3850 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in _mlx4_ib_post_recv()
3866 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in _mlx4_ib_post_recv()
3882 ib_dma_sync_single_for_device(ibqp->device, in _mlx4_ib_post_recv()
3927 int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mlx4_ib_post_recv() argument
3930 return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false); in mlx4_ib_post_recv()
4006 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, in mlx4_ib_query_qp() argument
4009 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); in mlx4_ib_query_qp()
4010 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp()
4015 if (ibqp->rwq_ind_tbl) in mlx4_ib_query_qp()
4045 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || in mlx4_ib_query_qp()
4046 qp->ibqp.qp_type == IB_QPT_XRC_INI || in mlx4_ib_query_qp()
4047 qp->ibqp.qp_type == IB_QPT_XRC_TGT) { in mlx4_ib_query_qp()
4080 if (!ibqp->uobject) { in mlx4_ib_query_qp()