Lines Matching refs:qp

107 	struct mlx4_qp *qp;  member
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
138 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
147 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); in is_sqp()
151 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
158 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
159 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
165 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { in is_qp0()
174 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
176 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
179 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
181 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
184 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
186 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
194 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) in stamp_send_wqe() argument
202 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx4_ib_handle_qp_event()
219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event()
248 qpe_work->type, qpe_work->qp->qpn); in mlx4_ib_handle_qp_event()
255 mlx4_put_qp(qpe_work->qp); in mlx4_ib_handle_qp_event()
259 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
265 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
274 qpe_work->qp = qp; in mlx4_ib_qp_event()
281 mlx4_put_qp(qp); in mlx4_ib_qp_event()
284 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_wq_event() argument
287 type, qp->qpn); in mlx4_ib_wq_event()
337 bool is_user, bool has_rq, struct mlx4_ib_qp *qp, in set_rq_size() argument
349 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
360 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
361 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
362 wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); in set_rq_size()
363 qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); in set_rq_size()
368 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
369 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
371 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
372 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
373 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
382 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
389 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
404 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
409 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
415 qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); in set_kernel_sq_size()
416 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + in set_kernel_sq_size()
417 qp->sq_spare_wqes); in set_kernel_sq_size()
419 qp->sq.max_gs = in set_kernel_sq_size()
421 (1 << qp->sq.wqe_shift)) - in set_kernel_sq_size()
422 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
425 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
426 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
427 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
428 qp->rq.offset = 0; in set_kernel_sq_size()
429 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
431 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
432 qp->sq.offset = 0; in set_kernel_sq_size()
435 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
436 qp->sq.wqe_cnt - qp->sq_spare_wqes; in set_kernel_sq_size()
437 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
447 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
457 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
458 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
460 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
461 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
466 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
470 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
471 kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), in alloc_proxy_bufs()
473 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
475 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
476 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
479 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
481 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
482 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
485 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
486 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
495 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
498 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
500 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
501 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
505 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
509 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
510 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
513 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
515 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
537 struct mlx4_ib_qp *qp) in mlx4_ib_free_qp_counter() argument
539 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
540 mlx4_counter_free(dev->dev, qp->counter_index->index); in mlx4_ib_free_qp_counter()
541 list_del(&qp->counter_index->list); in mlx4_ib_free_qp_counter()
542 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
544 kfree(qp->counter_index); in mlx4_ib_free_qp_counter()
545 qp->counter_index = NULL; in mlx4_ib_free_qp_counter()
653 struct mlx4_ib_qp *qp) in create_qp_rss() argument
658 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_rss()
660 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); in create_qp_rss()
664 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_rss()
668 INIT_LIST_HEAD(&qp->gid_list); in create_qp_rss()
669 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_rss()
671 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_qp_rss()
672 qp->state = IB_QPS_RESET; in create_qp_rss()
675 qp->sq_no_prefetch = 1; in create_qp_rss()
676 qp->sq.wqe_cnt = 1; in create_qp_rss()
677 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
678 qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
679 qp->mtt = (to_mqp( in create_qp_rss()
682 qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); in create_qp_rss()
683 if (!qp->rss_ctx) { in create_qp_rss()
688 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); in create_qp_rss()
695 kfree(qp->rss_ctx); in create_qp_rss()
698 mlx4_qp_remove(dev->dev, &qp->mqp); in create_qp_rss()
699 mlx4_qp_free(dev->dev, &qp->mqp); in create_qp_rss()
706 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp_rss() argument
763 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
764 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
766 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); in _mlx4_ib_create_qp_rss()
770 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
780 struct mlx4_ib_qp *qp, int range_size, int *wqn) in mlx4_ib_alloc_wqn() argument
800 qp->mqp.usage); in mlx4_ib_alloc_wqn()
817 qp->wqn_range = range; in mlx4_ib_alloc_wqn()
830 struct mlx4_ib_qp *qp, bool dirty_release) in mlx4_ib_release_wqn() argument
837 range = qp->wqn_range; in mlx4_ib_release_wqn()
858 struct ib_udata *udata, struct mlx4_ib_qp *qp) in create_rq() argument
873 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_rq()
875 spin_lock_init(&qp->sq.lock); in create_rq()
876 spin_lock_init(&qp->rq.lock); in create_rq()
877 INIT_LIST_HEAD(&qp->gid_list); in create_rq()
878 INIT_LIST_HEAD(&qp->steering_rules); in create_rq()
880 qp->state = IB_QPS_RESET; in create_rq()
905 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_rq()
907 err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); in create_rq()
911 qp->sq_no_prefetch = 1; in create_rq()
912 qp->sq.wqe_cnt = 1; in create_rq()
913 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_rq()
914 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in create_rq()
915 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_rq()
917 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); in create_rq()
918 if (IS_ERR(qp->umem)) { in create_rq()
919 err = PTR_ERR(qp->umem); in create_rq()
923 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_rq()
924 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_rq()
929 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_rq()
933 err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); in create_rq()
936 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_rq()
938 err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); in create_rq()
942 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_rq()
951 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_rq()
953 qp->mqp.event = mlx4_ib_wq_event; in create_rq()
961 list_add_tail(&qp->qps_list, &dev->qp_list); in create_rq()
966 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_rq()
968 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_rq()
975 mlx4_ib_release_wqn(context, qp, 0); in create_rq()
977 mlx4_ib_db_unmap_user(context, &qp->db); in create_rq()
980 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_rq()
982 ib_umem_release(qp->umem); in create_rq()
989 struct mlx4_ib_qp *qp) in create_qp_common() argument
1046 qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
1047 if (!qp->sqp) in create_qp_common()
1051 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
1053 spin_lock_init(&qp->sq.lock); in create_qp_common()
1054 spin_lock_init(&qp->rq.lock); in create_qp_common()
1055 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
1056 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
1058 qp->state = IB_QPS_RESET; in create_qp_common()
1060 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
1075 qp->inl_recv_sz = ucmd.inl_recv_sz; in create_qp_common()
1085 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_qp_common()
1089 qp_has_rq(init_attr), qp, qp->inl_recv_sz); in create_qp_common()
1093 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
1095 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
1099 qp->umem = in create_qp_common()
1100 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); in create_qp_common()
1101 if (IS_ERR(qp->umem)) { in create_qp_common()
1102 err = PTR_ERR(qp->umem); in create_qp_common()
1106 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_qp_common()
1107 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_qp_common()
1112 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
1117 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); in create_qp_common()
1121 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_common()
1124 qp_has_rq(init_attr), qp, 0); in create_qp_common()
1128 qp->sq_no_prefetch = 0; in create_qp_common()
1131 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
1136 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
1143 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
1148 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
1152 *qp->db.db = 0; in create_qp_common()
1155 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, in create_qp_common()
1156 &qp->buf)) { in create_qp_common()
1161 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
1162 &qp->mtt); in create_qp_common()
1166 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
1170 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, in create_qp_common()
1172 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, in create_qp_common()
1174 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
1178 qp->mqp.usage = MLX4_RES_USAGE_DRIVER; in create_qp_common()
1182 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
1184 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
1199 qp->mqp.usage); in create_qp_common()
1201 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1205 &qpn, 0, qp->mqp.usage); in create_qp_common()
1211 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
1213 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
1218 qp->mqp.qpn |= (1 << 23); in create_qp_common()
1225 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1227 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
1235 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
1240 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
1242 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
1250 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1256 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
1257 free_proxy_bufs(pd->device, qp); in create_qp_common()
1261 mlx4_ib_db_unmap_user(context, &qp->db); in create_qp_common()
1263 kvfree(qp->sq.wrid); in create_qp_common()
1264 kvfree(qp->rq.wrid); in create_qp_common()
1268 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
1271 if (!qp->umem) in create_qp_common()
1272 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
1273 ib_umem_release(qp->umem); in create_qp_common()
1277 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
1280 kfree(qp->sqp); in create_qp_common()
1328 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
1332 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
1338 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
1340 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1341 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1343 return to_mpd(qp->ibqp.pd); in get_pd()
1346 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, in get_cqs() argument
1349 switch (qp->ibqp.qp_type) { in get_cqs()
1351 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
1355 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1359 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : in get_cqs()
1360 to_mcq(qp->ibwq.cq); in get_cqs()
1361 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1367 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in destroy_qp_rss() argument
1369 if (qp->state != IB_QPS_RESET) { in destroy_qp_rss()
1372 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); in destroy_qp_rss()
1374 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; in destroy_qp_rss()
1384 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_rss()
1385 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_rss()
1387 qp->mqp.qpn); in destroy_qp_rss()
1390 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_rss()
1391 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_rss()
1392 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_rss()
1393 del_gid_entries(qp); in destroy_qp_rss()
1396 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
1403 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1404 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
1405 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1407 qp->mqp.qpn); in destroy_qp_common()
1408 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
1409 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
1410 qp->pri.smac = 0; in destroy_qp_common()
1411 qp->pri.smac_port = 0; in destroy_qp_common()
1413 if (qp->alt.smac) { in destroy_qp_common()
1414 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
1415 qp->alt.smac = 0; in destroy_qp_common()
1417 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1418 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1419 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1420 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1421 qp->pri.update_vid = 0; in destroy_qp_common()
1423 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1424 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1425 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1426 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1427 qp->alt.update_vid = 0; in destroy_qp_common()
1431 get_cqs(qp, src, &send_cq, &recv_cq); in destroy_qp_common()
1437 list_del(&qp->qps_list); in destroy_qp_common()
1438 list_del(&qp->cq_send_list); in destroy_qp_common()
1439 list_del(&qp->cq_recv_list); in destroy_qp_common()
1441 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1442 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1444 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1447 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1452 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1454 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1455 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1456 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1463 qp, 1); in destroy_qp_common()
1465 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1468 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1471 if (qp->rq.wqe_cnt) { in destroy_qp_common()
1478 mlx4_ib_db_unmap_user(mcontext, &qp->db); in destroy_qp_common()
1481 kvfree(qp->sq.wrid); in destroy_qp_common()
1482 kvfree(qp->rq.wrid); in destroy_qp_common()
1483 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1485 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1486 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1487 if (qp->rq.wqe_cnt) in destroy_qp_common()
1488 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1490 ib_umem_release(qp->umem); in destroy_qp_common()
1492 del_gid_entries(qp); in destroy_qp_common()
1512 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp() argument
1521 return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); in _mlx4_ib_create_qp()
1570 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1571 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1572 err = create_qp_common(pd, init_attr, udata, 0, qp); in _mlx4_ib_create_qp()
1576 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp()
1577 qp->xrcdn = xrcdn; in _mlx4_ib_create_qp()
1595 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1596 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1597 err = create_qp_common(pd, init_attr, udata, sqpn, qp); in _mlx4_ib_create_qp()
1604 rdma_restrack_no_track(&qp->ibqp.res); in _mlx4_ib_create_qp()
1606 qp->port = init_attr->port_num; in _mlx4_ib_create_qp()
1607 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : in _mlx4_ib_create_qp()
1623 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_create_qp() local
1627 mutex_init(&qp->mutex); in mlx4_ib_create_qp()
1628 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); in mlx4_ib_create_qp()
1634 struct mlx4_ib_sqp *sqp = qp->sqp; in mlx4_ib_create_qp()
1656 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in _mlx4_ib_destroy_qp() argument
1658 struct mlx4_ib_dev *dev = to_mdev(qp->device); in _mlx4_ib_destroy_qp()
1659 struct mlx4_ib_qp *mqp = to_mqp(qp); in _mlx4_ib_destroy_qp()
1674 if (qp->rwq_ind_tbl) { in _mlx4_ib_destroy_qp()
1684 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx4_ib_destroy_qp() argument
1686 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1695 return _mlx4_ib_destroy_qp(qp, udata); in mlx4_ib_destroy_qp()
1722 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1732 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1737 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1886 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1892 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1899 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1904 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1910 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1914 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1915 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1917 ge->port = qp->port; in update_mcg_macs()
1923 struct mlx4_ib_qp *qp, in handle_eth_ud_smac_index() argument
1929 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1931 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1932 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1933 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1935 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1936 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1937 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1946 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in create_qp_lb_counter() argument
1952 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != in create_qp_lb_counter()
1954 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || in create_qp_lb_counter()
1970 qp->counter_index = new_counter_index; in create_qp_lb_counter()
1972 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1974 &dev->counters_table[qp->port - 1].counters_list); in create_qp_lb_counter()
1975 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
2084 struct mlx4_ib_qp *qp) in fill_qp_rss_context() argument
2091 rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); in fill_qp_rss_context()
2093 cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); in fill_qp_rss_context()
2094 if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) in fill_qp_rss_context()
2096 rss_context->flags = qp->rss_ctx->flags; in fill_qp_rss_context()
2100 memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, in fill_qp_rss_context()
2115 struct mlx4_ib_qp *qp; in __mlx4_ib_modify_qp() local
2134 qp = to_mqp((struct ib_qp *)ibwq); in __mlx4_ib_modify_qp()
2144 qp = to_mqp(ibqp); in __mlx4_ib_modify_qp()
2146 pd = get_pd(qp); in __mlx4_ib_modify_qp()
2151 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2160 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
2179 if (qp->inl_recv_sz) in __mlx4_ib_modify_qp()
2182 if (qp->flags & MLX4_IB_QP_SCATTER_FCS) in __mlx4_ib_modify_qp()
2190 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
2206 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2207 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2208 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2211 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
2212 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2213 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2215 if (new_state == IB_QPS_RESET && qp->counter_index) in __mlx4_ib_modify_qp()
2216 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2219 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
2220 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
2244 err = create_qp_lb_counter(dev, qp); in __mlx4_ib_modify_qp()
2249 dev->counters_table[qp->port - 1].default_counter; in __mlx4_ib_modify_qp()
2250 if (qp->counter_index) in __mlx4_ib_modify_qp()
2251 counter_index = qp->counter_index->index; in __mlx4_ib_modify_qp()
2256 if (qp->counter_index) { in __mlx4_ib_modify_qp()
2266 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
2267 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
2272 enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? in __mlx4_ib_modify_qp()
2281 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2289 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in __mlx4_ib_modify_qp()
2304 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
2338 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
2352 get_cqs(qp, src_type, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
2392 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
2408 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2413 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
2431 if (qp->rq.wqe_cnt && in __mlx4_ib_modify_qp()
2434 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
2440 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
2441 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
2442 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2445 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
2448 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2452 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2454 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
2455 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
2458 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
2459 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
2460 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
2461 err = handle_eth_ud_smac_index(dev, qp, context); in __mlx4_ib_modify_qp()
2466 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
2467 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
2484 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2515 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
2516 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2519 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
2520 stamp_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2527 fill_qp_rss_context(context, qp); in __mlx4_ib_modify_qp()
2531 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
2533 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
2537 qp->state = new_state; in __mlx4_ib_modify_qp()
2540 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
2542 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
2544 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
2545 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
2548 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
2550 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
2551 store_sqp_attrs(qp->sqp, attr, attr_mask); in __mlx4_ib_modify_qp()
2557 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
2559 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
2561 qp->port); in __mlx4_ib_modify_qp()
2565 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
2574 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
2577 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2579 qp->rq.head = 0; in __mlx4_ib_modify_qp()
2580 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
2581 qp->sq.head = 0; in __mlx4_ib_modify_qp()
2582 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
2583 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
2584 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2585 *qp->db.db = 0; in __mlx4_ib_modify_qp()
2587 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
2588 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2590 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
2591 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2592 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
2593 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
2595 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
2596 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2597 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
2599 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
2600 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
2601 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2602 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2603 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2606 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
2607 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
2608 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2609 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2610 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2614 if (err && qp->counter_index) in __mlx4_ib_modify_qp()
2615 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2617 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2619 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
2620 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
2622 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
2624 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
2625 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2626 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
2627 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
2628 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
2630 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
2631 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2632 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2634 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
2636 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
2638 if (qp->alt.smac) in __mlx4_ib_modify_qp()
2639 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2640 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
2641 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
2642 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
2644 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
2645 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2646 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2649 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
2651 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2652 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
2653 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
2655 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
2656 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
2657 qp->pri.vid); in __mlx4_ib_modify_qp()
2658 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
2659 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
2660 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
2662 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2663 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2666 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
2668 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2669 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
2670 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
2672 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
2673 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
2674 qp->alt.vid); in __mlx4_ib_modify_qp()
2675 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
2676 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
2677 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
2679 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2680 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2695 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_modify_qp() local
2698 mutex_lock(&qp->mutex); in _mlx4_ib_modify_qp()
2700 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in _mlx4_ib_modify_qp()
2765 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _mlx4_ib_modify_qp()
2815 mutex_unlock(&qp->mutex); in _mlx4_ib_modify_qp()
2856 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, in build_sriov_qp0_header() argument
2860 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); in build_sriov_qp0_header()
2861 struct mlx4_ib_sqp *sqp = qp->sqp; in build_sriov_qp0_header()
2862 struct ib_device *ib_dev = qp->ibqp.device; in build_sriov_qp0_header()
2884 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2889 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2906 err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); in build_sriov_qp0_header()
2910 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2914 cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); in build_sriov_qp0_header()
2918 if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2921 if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2925 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); in build_sriov_qp0_header()
3009 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
3012 struct mlx4_ib_sqp *sqp = qp->sqp; in build_mlx_header()
3013 struct ib_device *ib_dev = qp->ibqp.device; in build_mlx_header()
3037 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
3051 err = fill_gid_by_hw_index(ibdev, qp->port, in build_mlx_header()
3103 .demux[qp->port - 1] in build_mlx_header()
3108 ->sriov.demux[qp->port - 1] in build_mlx_header()
3142 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
3191 !qp->ibqp.qp_num ? in build_mlx_header()
3195 qp->port); in build_mlx_header()
3196 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) in build_mlx_header()
3202 if (!qp->ibqp.qp_num) in build_mlx_header()
3203 err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, in build_mlx_header()
3206 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
3216 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
3477 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, in build_lso_seg() argument
3485 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
3486 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
3521 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_send() local
3538 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { in _mlx4_ib_post_send()
3539 struct mlx4_ib_sqp *sqp = qp->sqp; in _mlx4_ib_post_send()
3546 if (!fill_gid_by_hw_index(mdev, qp->port, in _mlx4_ib_post_send()
3549 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? in _mlx4_ib_post_send()
3550 to_mqp(sqp->roce_v2_gsi) : qp; in _mlx4_ib_post_send()
3557 spin_lock_irqsave(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3566 ind = qp->sq_next_wqe; in _mlx4_ib_post_send()
3572 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3578 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx4_ib_post_send()
3584 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in _mlx4_ib_post_send()
3585 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3595 qp->sq_signal_bits; in _mlx4_ib_post_send()
3602 switch (qp->mlx4_ib_qp_type) { in _mlx4_ib_post_send()
3666 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3690 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in _mlx4_ib_post_send()
3703 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3727 qp->mlx4_ib_qp_type); in _mlx4_ib_post_send()
3737 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); in _mlx4_ib_post_send()
3762 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in _mlx4_ib_post_send()
3763 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in _mlx4_ib_post_send()
3764 qp->mlx4_ib_qp_type & in _mlx4_ib_post_send()
3798 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in _mlx4_ib_post_send()
3806 stamp_send_wqe(qp, ind + qp->sq_spare_wqes); in _mlx4_ib_post_send()
3812 qp->sq.head += nreq; in _mlx4_ib_post_send()
3820 writel_relaxed(qp->doorbell_qpn, in _mlx4_ib_post_send()
3823 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); in _mlx4_ib_post_send()
3825 qp->sq_next_wqe = ind; in _mlx4_ib_post_send()
3828 spin_unlock_irqrestore(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3842 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_recv() local
3852 max_gs = qp->rq.max_gs; in _mlx4_ib_post_recv()
3853 spin_lock_irqsave(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
3863 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3866 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in _mlx4_ib_post_recv()
3872 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in _mlx4_ib_post_recv()
3878 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv()
3880 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in _mlx4_ib_post_recv()
3883 qp->sqp_proxy_rcv[ind].map, in _mlx4_ib_post_recv()
3890 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv()
3904 qp->rq.wrid[ind] = wr->wr_id; in _mlx4_ib_post_recv()
3906 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3911 qp->rq.head += nreq; in _mlx4_ib_post_recv()
3919 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in _mlx4_ib_post_recv()
3922 spin_unlock_irqrestore(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
4010 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
4018 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
4020 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
4025 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
4033 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
4034 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
4045 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || in mlx4_ib_query_qp()
4046 qp->ibqp.qp_type == IB_QPT_XRC_INI || in mlx4_ib_query_qp()
4047 qp->ibqp.qp_type == IB_QPT_XRC_TGT) { in mlx4_ib_query_qp()
4057 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
4077 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
4078 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
4081 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
4082 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
4097 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
4100 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
4103 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
4107 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
4111 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()
4121 struct mlx4_ib_qp *qp; in mlx4_ib_create_wq() local
4157 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx4_ib_create_wq()
4158 if (!qp) in mlx4_ib_create_wq()
4161 mutex_init(&qp->mutex); in mlx4_ib_create_wq()
4162 qp->pri.vid = 0xFFFF; in mlx4_ib_create_wq()
4163 qp->alt.vid = 0xFFFF; in mlx4_ib_create_wq()
4175 err = create_rq(pd, &ib_qp_init_attr, udata, qp); in mlx4_ib_create_wq()
4177 kfree(qp); in mlx4_ib_create_wq()
4181 qp->ibwq.event_handler = init_attr->event_handler; in mlx4_ib_create_wq()
4182 qp->ibwq.wq_num = qp->mqp.qpn; in mlx4_ib_create_wq()
4183 qp->ibwq.state = IB_WQS_RESET; in mlx4_ib_create_wq()
4185 return &qp->ibwq; in mlx4_ib_create_wq()
4203 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in _mlx4_ib_modify_wq() local
4212 qp_cur_state = qp->state; in _mlx4_ib_modify_wq()
4221 attr.port_num = qp->port; in _mlx4_ib_modify_wq()
4251 qp->state = qp_new_state; in _mlx4_ib_modify_wq()
4259 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_modify_wq() local
4296 mutex_lock(&qp->mutex); in mlx4_ib_modify_wq()
4301 if (qp->rss_usecnt) in mlx4_ib_modify_wq()
4307 mutex_unlock(&qp->mutex); in mlx4_ib_modify_wq()
4315 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_destroy_wq() local
4317 if (qp->counter_index) in mlx4_ib_destroy_wq()
4318 mlx4_ib_free_qp_counter(dev, qp); in mlx4_ib_destroy_wq()
4320 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); in mlx4_ib_destroy_wq()
4322 kfree(qp); in mlx4_ib_destroy_wq()
4442 void mlx4_ib_drain_sq(struct ib_qp *qp) in mlx4_ib_drain_sq() argument
4444 struct ib_cq *cq = qp->send_cq; in mlx4_ib_drain_sq()
4456 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_sq()
4459 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_sq()
4468 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); in mlx4_ib_drain_sq()
4477 void mlx4_ib_drain_rq(struct ib_qp *qp) in mlx4_ib_drain_rq() argument
4479 struct ib_cq *cq = qp->recv_cq; in mlx4_ib_drain_rq()
4485 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_rq()
4488 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_rq()
4498 ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); in mlx4_ib_drain_rq()