Lines Matching refs:wr

104 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,  in set_eth_seg()  argument
111 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
115 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
116 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg()
153 const struct ib_send_wr *wr) in set_datagram_seg() argument
155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
321 const struct ib_send_wr *wr) in set_reg_umr_segment() argument
323 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
328 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment()
337 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { in set_reg_umr_segment()
344 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) in set_reg_umr_segment()
346 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { in set_reg_umr_segment()
353 if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) in set_reg_umr_segment()
355 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) in set_reg_umr_segment()
358 if (!wr->num_sge) in set_reg_umr_segment()
403 const struct ib_send_wr *wr) in set_reg_mkey_segment() argument
405 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_mkey_segment()
408 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) in set_reg_mkey_segment()
427 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && in set_reg_mkey_segment()
449 static __be32 send_ieth(const struct ib_send_wr *wr) in send_ieth() argument
451 switch (wr->opcode) { in send_ieth()
454 return wr->ex.imm_data; in send_ieth()
457 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
481 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in set_data_inl_seg() argument
493 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
494 size_t len = wr->sg_list[i].length; in set_data_inl_seg()
495 void *addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
782 const struct ib_reg_wr *wr = reg_wr(send_wr); in set_pi_umr_wr() local
783 struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr); in set_pi_umr_wr()
791 unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) || in set_pi_umr_wr()
814 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, in set_pi_umr_wr()
820 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, in set_pi_umr_wr()
857 const struct ib_reg_wr *wr, in set_reg_wr() argument
861 struct mlx5_ib_mr *mr = to_mmr(wr->mr); in set_reg_wr()
866 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; in set_reg_wr()
873 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) { in set_reg_wr()
880 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { in set_reg_wr()
896 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
947 const struct ib_send_wr *wr, unsigned int *idx, in __begin_wqe() argument
958 (*ctrl)->imm = send_ieth(wr); in __begin_wqe()
972 const struct ib_send_wr *wr, unsigned int *idx, int *size, in begin_wqe() argument
975 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in begin_wqe()
976 wr->send_flags & IB_SEND_SIGNALED, in begin_wqe()
977 wr->send_flags & IB_SEND_SOLICITED); in begin_wqe()
1011 static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) in handle_rdma_op() argument
1013 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); in handle_rdma_op()
1018 static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in handle_local_inv() argument
1023 (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey); in handle_local_inv()
1027 static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in handle_reg_mr() argument
1032 (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key); in handle_reg_mr()
1033 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true); in handle_reg_mr()
1037 const struct ib_send_wr *wr, in handle_psv() argument
1048 err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in handle_psv()
1060 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_psv()
1069 const struct ib_send_wr *wr, in handle_reg_mr_integrity() argument
1084 mr = to_mmr(reg_wr(wr)->mr); in handle_reg_mr_integrity()
1092 reg_pi_wr.access = reg_wr(wr)->access; in handle_reg_mr_integrity()
1101 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_reg_mr_integrity()
1104 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq); in handle_reg_mr_integrity()
1128 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge); in handle_reg_mr_integrity()
1133 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_reg_mr_integrity()
1137 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
1143 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
1156 const struct ib_send_wr *wr, in handle_qpt_rc() argument
1163 switch (wr->opcode) { in handle_qpt_rc()
1167 handle_rdma_op(wr, seg, size); in handle_qpt_rc()
1178 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
1183 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
1190 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size, in handle_qpt_rc()
1206 static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) in handle_qpt_uc() argument
1208 switch (wr->opcode) { in handle_qpt_uc()
1211 handle_rdma_op(wr, seg, size); in handle_qpt_uc()
1219 const struct ib_send_wr *wr, void **seg, in handle_qpt_hw_gsi() argument
1222 set_datagram_seg(*seg, wr); in handle_qpt_hw_gsi()
1228 static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in handle_qpt_ud() argument
1231 set_datagram_seg(*seg, wr); in handle_qpt_ud()
1244 set_eth_seg(wr, qp, seg, size, cur_edge); in handle_qpt_ud()
1250 const struct ib_send_wr *wr, in handle_qpt_reg_umr() argument
1256 if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) { in handle_qpt_reg_umr()
1258 mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode); in handle_qpt_reg_umr()
1263 (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey); in handle_qpt_reg_umr()
1264 err = set_reg_umr_segment(dev, *seg, wr); in handle_qpt_reg_umr()
1270 set_reg_mkey_segment(dev, *seg, wr); in handle_qpt_reg_umr()
1278 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mlx5_ib_post_send() argument
1301 *bad_wr = wr; in mlx5_ib_post_send()
1306 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); in mlx5_ib_post_send()
1312 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
1313 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
1316 *bad_wr = wr; in mlx5_ib_post_send()
1320 num_sge = wr->num_sge; in mlx5_ib_post_send()
1324 *bad_wr = wr; in mlx5_ib_post_send()
1328 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge, in mlx5_ib_post_send()
1333 *bad_wr = wr; in mlx5_ib_post_send()
1337 if (wr->opcode == IB_WR_REG_MR || in mlx5_ib_post_send()
1338 wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1342 if (wr->send_flags & IB_SEND_FENCE) { in mlx5_ib_post_send()
1359 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size, in mlx5_ib_post_send()
1363 *bad_wr = wr; in mlx5_ib_post_send()
1365 } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1371 handle_qpt_uc(wr, &seg, &size); in mlx5_ib_post_send()
1377 *bad_wr = wr; in mlx5_ib_post_send()
1382 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1385 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1388 err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg, in mlx5_ib_post_send()
1398 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
1399 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1402 *bad_wr = wr; in mlx5_ib_post_send()
1409 if (unlikely(!wr->sg_list[i].length)) in mlx5_ib_post_send()
1414 wr->sg_list + i); in mlx5_ib_post_send()
1421 finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
1422 fence, mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
1461 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mlx5_ib_post_recv() argument
1477 *bad_wr = wr; in mlx5_ib_post_recv()
1482 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); in mlx5_ib_post_recv()
1488 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
1491 *bad_wr = wr; in mlx5_ib_post_recv()
1495 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
1497 *bad_wr = wr; in mlx5_ib_post_recv()
1505 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
1506 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
1519 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()