/linux-6.3-rc2/drivers/net/ethernet/huawei/hinic/ |
A D | hinic_hw_qp.h | 181 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, 185 unsigned int wqe_size, u16 *prod_idx); 187 void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size); 191 unsigned int wqe_size); 195 unsigned int wqe_size, u16 *cons_idx); 199 unsigned int *wqe_size, u16 *cons_idx); 201 void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); 207 unsigned int wqe_size, u16 *prod_idx); 213 unsigned int wqe_size, 217 unsigned int wqe_size, [all …]
|
A D | hinic_hw_qp.c | 676 hinic_return_wqe(sq->wq, wqe_size); in hinic_sq_return_wqe() 696 hinic_cpu_to_be32(sq_wqe, wqe_size); in hinic_sq_write_wqe() 698 hinic_write_wqe(sq->wq, hw_wqe, wqe_size); in hinic_sq_write_wqe() 735 *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); in hinic_sq_read_wqebb() 768 hinic_put_wqe(sq->wq, wqe_size); in hinic_sq_put_wqe() 838 unsigned int wqe_size, in hinic_rq_read_wqe() argument 873 unsigned int wqe_size, in hinic_rq_read_next_wqe() argument 881 wqe_size = ALIGN(wqe_size, wq->wqebb_size); in hinic_rq_read_next_wqe() 882 num_wqebbs = wqe_size / wq->wqebb_size; in hinic_rq_read_next_wqe() 900 unsigned int wqe_size) in hinic_rq_put_wqe() argument [all …]
|
A D | hinic_tx.c | 498 unsigned int wqe_size; in hinic_lb_xmit_frame() local 510 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); in hinic_lb_xmit_frame() 528 wqe_size = 0; in hinic_lb_xmit_frame() 559 unsigned int wqe_size; in hinic_xmit_frame() local 591 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); in hinic_xmit_frame() 612 wqe_size = 0; in hinic_xmit_frame() 670 unsigned int wqe_size; in free_all_tx_skbs() local 684 hinic_sq_put_wqe(sq, wqe_size); in free_all_tx_skbs() 706 unsigned int wqe_size; in free_tx_poll() local 726 if (wqe_size > wq->wqebb_size) { in free_tx_poll() [all …]
|
A D | hinic_hw_wq.h | 96 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, 99 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size); 101 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); 103 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, 109 unsigned int wqe_size);
|
A D | hinic_hw_wq.c | 740 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, in hinic_get_wqe() argument 748 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; in hinic_get_wqe() 789 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) in hinic_return_wqe() argument 791 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; in hinic_return_wqe() 803 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) in hinic_put_wqe() argument 805 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) in hinic_put_wqe() 821 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, in hinic_read_wqe() argument 824 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) in hinic_read_wqe() 889 unsigned int wqe_size) in hinic_write_wqe() argument 899 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; in hinic_write_wqe()
|
A D | hinic_hw_cmdq.c | 155 unsigned int wqe_size = 0; in cmdq_wqe_size_from_bdlen() local 159 wqe_size = WQE_LCMD_SIZE; in cmdq_wqe_size_from_bdlen() 162 wqe_size = WQE_SCMD_SIZE; in cmdq_wqe_size_from_bdlen() 166 return wqe_size; in cmdq_wqe_size_from_bdlen() 532 unsigned int bufdesc_len, wqe_size; in clear_wqe_complete_bit() local 536 wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); in clear_wqe_complete_bit() 537 if (wqe_size == WQE_LCMD_SIZE) { in clear_wqe_complete_bit()
|
/linux-6.3-rc2/drivers/infiniband/hw/qedr/ |
A D | qedr_hsi_rdma.h | 310 u8 wqe_size; member 338 u8 wqe_size; member 374 u8 wqe_size; member 420 u8 wqe_size; member 475 u8 wqe_size; member 498 u8 wqe_size; member 548 u8 wqe_size; member 602 u8 wqe_size; member 628 u8 wqe_size; member 663 u8 wqe_size; member [all …]
|
A D | verbs.c | 3318 (*wqe_size)++; in qedr_prepare_sq_inline_data() 3381 if (wqe_size) in qedr_prepare_sq_sges() 3586 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; in __qedr_post_send() 3599 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; in __qedr_post_send() 3612 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; in __qedr_post_send() 3632 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; in __qedr_post_send() 3645 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; in __qedr_post_send() 3662 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; in __qedr_post_send() 3692 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size; in __qedr_post_send() 3702 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size; in __qedr_post_send() [all …]
|
A D | qedr.h | 434 u8 wqe_size; member 445 u8 wqe_size; member
|
/linux-6.3-rc2/drivers/infiniband/hw/erdma/ |
A D | erdma_qp.c | 341 wqe_size = sizeof(struct erdma_write_sqe); in erdma_push_one_sqe() 342 sgl_offset = wqe_size; in erdma_push_one_sqe() 370 wqe_size = sizeof(struct erdma_readreq_sqe) + in erdma_push_one_sqe() 389 wqe_size = sizeof(struct erdma_send_sqe); in erdma_push_one_sqe() 390 sgl_offset = wqe_size; in erdma_push_one_sqe() 414 wqe_size = sizeof(struct erdma_reg_mr_sqe) + in erdma_push_one_sqe() 418 wqe_size = sizeof(struct erdma_reg_mr_sqe); in erdma_push_one_sqe() 428 wqe_size = sizeof(struct erdma_reg_mr_sqe); in erdma_push_one_sqe() 457 wqe_size = sizeof(*atomic_sqe); in erdma_push_one_sqe() 468 wqe_size += ret; in erdma_push_one_sqe() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | txrx.h | 98 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) in mlx5e_fetch_wqe() argument 103 memset(wqe, 0, wqe_size); in mlx5e_fetch_wqe() 452 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) in mlx5e_stop_room_for_wqe() argument 464 WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev), in mlx5e_stop_room_for_wqe() 466 wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); in mlx5e_stop_room_for_wqe() 468 return MLX5E_STOP_ROOM(wqe_size); in mlx5e_stop_room_for_wqe() 483 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) in mlx5e_icosq_can_post_wqe() argument 485 u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size); in mlx5e_icosq_can_post_wqe()
|
A D | params.c | 822 int wqe_size = BIT(log_stride_sz) * num_strides; in mlx5e_shampo_get_log_cq_size() local 827 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); in mlx5e_shampo_get_log_cq_size() 1034 int wqe_size = BIT(log_stride_sz) * num_strides; in mlx5e_shampo_hd_per_wqe() local 1038 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; in mlx5e_shampo_hd_per_wqe() 1040 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); in mlx5e_shampo_hd_per_wqe()
|
/linux-6.3-rc2/drivers/infiniband/sw/rxe/ |
A D | rxe_qp.c | 188 int wqe_size; in rxe_qp_init_req() local 207 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), in rxe_qp_init_req() 210 wqe_size / sizeof(struct ib_sge); in rxe_qp_init_req() 211 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; in rxe_qp_init_req() 212 wqe_size += sizeof(struct rxe_send_wqe); in rxe_qp_init_req() 216 wqe_size, type); in rxe_qp_init_req() 258 int wqe_size; in rxe_qp_init_resp() local 265 wqe_size = rcv_wqe_size(qp->rq.max_sge); in rxe_qp_init_resp() 269 wqe_size, type); in rxe_qp_init_resp()
|
/linux-6.3-rc2/drivers/infiniband/hw/bnxt_re/ |
A D | qplib_fp.h | 95 u16 wqe_size; member 254 u16 wqe_size; member 569 return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); in bnxt_qplib_get_depth() 584 static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size) in bnxt_qplib_set_rq_max_slot() argument 586 return (wqe_size / sizeof(struct sq_sge)); in bnxt_qplib_set_rq_max_slot()
|
A D | ib_verbs.c | 860 u16 wqe_size, calc_ils; in bnxt_re_get_wqe_size() local 862 wqe_size = bnxt_re_get_swqe_size(nsge); in bnxt_re_get_wqe_size() 865 wqe_size = max_t(u16, calc_ils, wqe_size); in bnxt_re_get_wqe_size() 866 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); in bnxt_re_get_wqe_size() 868 return wqe_size; in bnxt_re_get_wqe_size() 899 qplqp->max_inline_data = sq->wqe_size - in bnxt_re_setup_swqe_size() 926 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); in bnxt_re_init_user_qp() 934 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / in bnxt_re_init_user_qp() 1048 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6); in bnxt_re_create_shadow_qp() 1059 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6); in bnxt_re_create_shadow_qp() [all …]
|
A D | roce_hsi.h | 194 u8 wqe_size; member 217 u8 wqe_size; member 237 u8 wqe_size; member 286 u8 wqe_size; member 309 u8 wqe_size; member 324 u8 wqe_size; member 521 u8 wqe_size; member 537 u8 wqe_size; member
|
/linux-6.3-rc2/drivers/infiniband/hw/ocrdma/ |
A D | ocrdma_verbs.c | 368 dev->attr.wqe_size) : 0; in _ocrdma_alloc_pd() 500 resp.wqe_size = dev->attr.wqe_size; in ocrdma_alloc_ucontext() 502 resp.dpp_wqe_size = dev->attr.wqe_size; in ocrdma_alloc_ucontext() 1946 wqe_size += sizeof(struct ocrdma_sge); in ocrdma_build_inline_sges() 1953 wqe_size += sizeof(struct ocrdma_sge); in ocrdma_build_inline_sges() 1964 u32 wqe_size = sizeof(*hdr); in ocrdma_build_send() local 2036 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); in ocrdma_build_reg() 2202 u32 wqe_size = 0; in ocrdma_build_rqe() local 2207 wqe_size = sizeof(*sge) + sizeof(*rqe); in ocrdma_build_rqe() 2209 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << in ocrdma_build_rqe() [all …]
|
/linux-6.3-rc2/include/uapi/rdma/ |
A D | ocrdma-abi.h | 55 __u32 wqe_size; member
|
/linux-6.3-rc2/drivers/infiniband/hw/mlx5/ |
A D | qp.c | 270 size_t wqe_size = 1 << wq->wqe_shift; in mlx5_ib_read_wqe_rq() local 272 if (buflen < wqe_size) in mlx5_ib_read_wqe_rq() 304 if (buflen < wqe_size) in mlx5_ib_read_wqe_srq() 432 int wqe_size; in set_rq_size() local 463 wqe_size = in set_rq_size() 467 wqe_size = roundup_pow_of_two(wqe_size); in set_rq_size() 473 wqe_size, in set_rq_size() 590 int wqe_size; in calc_sq_size() local 596 wqe_size = calc_send_wqe(attr); in calc_sq_size() 598 if (wqe_size < 0) in calc_sq_size() [all …]
|
A D | odp.c | 1118 int wqe_size = 1 << srq->msrq.wqe_shift; in mlx5_ib_mr_responder_pfault_handler_srq() local 1120 if (wqe_size > wqe_length) { in mlx5_ib_mr_responder_pfault_handler_srq() 1125 *wqe_end = *wqe + wqe_size; in mlx5_ib_mr_responder_pfault_handler_srq() 1137 int wqe_size = 1 << wq->wqe_shift; in mlx5_ib_mr_responder_pfault_handler_rq() local 1144 if (wqe_size > wqe_length) { in mlx5_ib_mr_responder_pfault_handler_rq() 1149 *wqe_end = wqe + wqe_size; in mlx5_ib_mr_responder_pfault_handler_rq()
|
/linux-6.3-rc2/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_qp.c | 147 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + in pvrdma_set_rq_size() 150 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_rq_size() 172 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + in pvrdma_set_sq_size() 177 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_sq_size() 637 qp->sq.offset + n * qp->sq.wqe_size); in get_sq_wqe() 643 qp->rq.offset + n * qp->rq.wqe_size); in get_rq_wqe()
|
A D | pvrdma.h | 155 int wqe_size; member 170 int wqe_size; member
|
/linux-6.3-rc2/drivers/scsi/elx/libefc_sli/ |
A D | sli4.c | 750 size = sli4->wqe_size; in sli_get_queue_entry_size() 1377 memset(buf, 0, sli->wqe_size); in sli_abort_wqe() 1426 memset(buf, 0, sli->wqe_size); in sli_els_request64_wqe() 1571 memset(buf, 0, sli->wqe_size); in sli_fcp_icmnd64_wqe() 1638 memset(buf, 0, sli->wqe_size); in sli_fcp_iread64_wqe() 1741 memset(buf, 0, sli->wqe_size); in sli_fcp_iwrite64_wqe() 1832 memset(buf, 0, sli->wqe_size); in sli_fcp_treceive64_wqe() 1966 memset(buf, 0, sli4->wqe_size); in sli_fcp_trsp64_wqe() 2231 memset(buf, 0, sli->wqe_size); in sli_send_frame_wqe() 2291 memset(buf, 0, sli->wqe_size); in sli_xmit_bls_rsp64_wqe() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/microsoft/mana/ |
A D | gdma_main.c | 1067 u32 wqe_size; in mana_gd_post_work_request() local 1089 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size + in mana_gd_post_work_request() 1091 if (wqe_size > max_wqe_size) in mana_gd_post_work_request() 1094 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { in mana_gd_post_work_request() 1101 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; in mana_gd_post_work_request() 1111 wq->head += wqe_size / GDMA_WQE_BU_SIZE; in mana_gd_post_work_request()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | en.h | 159 #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ argument 160 (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) 162 #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ argument 163 ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
|