Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 196) sorted by relevance

12345678

/drivers/infiniband/hw/mthca/
A Dmthca_cq.c176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
304 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
336 cq->ibcq.cqe < cq->resize_buf->cqe) { in mthca_cq_resize_copy_cqes()
383 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe()
494 if (!cqe) in mthca_poll_one()
512 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; in mthca_poll_one()
646 set_cqe_hw(cqe); in mthca_poll_one()
[all …]
/drivers/infiniband/hw/mlx4/
A Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
367 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
378 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
435 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
452 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
679 if (!cqe) in mlx4_ib_poll_one()
683 cqe++; in mlx4_ib_poll_one()
704 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_poll_one()
944 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c106 int entries = attr->cqe; in pvrdma_create_cq()
133 cq->ibcq.cqe = entries; in pvrdma_create_cq()
187 cmd->cqe = entries; in pvrdma_create_cq()
196 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
294 cq->ibcq.cqe); in _pvrdma_flush_cqe()
310 *cqe = *curr_cqe; in _pvrdma_flush_cqe()
316 cq->ibcq.cqe); in _pvrdma_flush_cqe()
330 struct pvrdma_cqe *cqe; in pvrdma_poll_one() local
359 wc->wr_id = cqe->wr_id; in pvrdma_poll_one()
366 wc->slid = cqe->slid; in pvrdma_poll_one()
[all …]
/drivers/infiniband/sw/rxe/
A Drxe_cq.c12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument
16 if (cqe <= 0) { in rxe_cq_chk_attr()
17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr()
21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr()
23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr()
29 if (cqe < count) { in rxe_cq_chk_attr()
31 cqe, count); in rxe_cq_chk_attr()
50 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init()
65 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
79 cq->ibcq.cqe = cqe; in rxe_cq_resize_queue()
[all …]
/drivers/infiniband/sw/siw/
A Dsiw_cq.c50 struct siw_cqe *cqe; in siw_reap_cqe() local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
59 wc->byte_len = cqe->bytes; in siw_reap_cqe()
67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe()
71 wc->qp = cqe->base_qp; in siw_reap_cqe()
72 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe()
77 cqe->flags, (void *)(uintptr_t)cqe->id); in siw_reap_cqe()
86 u8 opcode = cqe->opcode; in siw_reap_cqe()
87 u16 status = cqe->status; in siw_reap_cqe()
[all …]
/drivers/net/ethernet/mellanox/mlxsw/
A Dpci_hw.h121 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
123 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
125 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
129 char *cqe, u32 val) \
134 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
137 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
140 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
158 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
169 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
176 MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
A Den_rx.c140 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
148 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
999 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local
1007 if (likely(!cqe)) in mlx5e_poll_ico_cq()
1671 mxbuf->cqe = cqe; in mlx5e_fill_mxbuf()
2304 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; in mlx5e_handle_rx_cqe_mpwrq_shampo()
2477 if (!cqe) in mlx5e_rx_cq_process_enhanced_cqe_comp()
2498 title_cqe = cqe; in mlx5e_rx_cq_process_enhanced_cqe_comp()
2503 rq, cqe); in mlx5e_rx_cq_process_enhanced_cqe_comp()
2521 struct mlx5_cqe64 *cqe; in mlx5e_rx_cq_process_basic_cqe_comp() local
[all …]
A Dwq.h202 struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); in mlx5_cqwq_get_wqe() local
205 cqe += wq->fbc.log_stride == 7; in mlx5_cqwq_get_wqe()
207 return cqe; in mlx5_cqwq_get_wqe()
233 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5_cqwq_get_cqe() local
234 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; in mlx5_cqwq_get_cqe()
243 return cqe; in mlx5_cqwq_get_cqe()
251 struct mlx5_cqe64 *cqe; in mlx5_cqwq_get_cqe_enhanced_comp() local
253 cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5_cqwq_get_cqe_enhanced_comp()
254 if (cqe->validity_iteration_count != sw_validity_iteration_count) in mlx5_cqwq_get_cqe_enhanced_comp()
260 return cqe; in mlx5_cqwq_get_cqe_enhanced_comp()
/drivers/infiniband/hw/cxgb4/
A Dcq.c190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
432 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) in cqe_completes_wr()
435 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) in cqe_completes_wr()
438 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
783 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), in __c4iw_poll_cq_one()
784 CQE_STATUS(&cqe), CQE_LEN(&cqe), in __c4iw_poll_cq_one()
785 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), in __c4iw_poll_cq_one()
812 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
849 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
[all …]
/drivers/infiniband/hw/erdma/
A Derdma_cq.c14 be32_to_cpu(READ_ONCE(*cqe))); in get_next_valid_cqe()
131 struct erdma_cqe *cqe; in erdma_poll_one_cqe() local
138 cqe = get_next_valid_cqe(cq); in erdma_poll_one_cqe()
139 if (!cqe) in erdma_poll_one_cqe()
147 qpn = be32_to_cpu(cqe->qpn); in erdma_poll_one_cqe()
149 cqe_hdr = be32_to_cpu(cqe->hdr); in erdma_poll_one_cqe()
189 erdma_process_ud_cqe(cqe, wc); in erdma_poll_one_cqe()
228 struct erdma_cqe *cqe, *dst_cqe; in erdma_remove_cqes_of_qp() local
256 cqe->hdr = cpu_to_be32( in erdma_remove_cqes_of_qp()
257 (be32_to_cpu(cqe->hdr) & in erdma_remove_cqes_of_qp()
[all …]
/drivers/infiniband/hw/mlx5/
A Dcq.c84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
91 return cqe; in get_sw_cqe()
279 cqe, sizeof(*cqe), false); in dump_cqe()
462 void *cqe; in mlx5_poll_one() local
467 if (!cqe) in mlx5_poll_one()
470 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
877 void *cqe; in init_cq_frag_buf() local
882 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_frag_buf()
1116 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
[all …]
/drivers/infiniband/hw/mana/
A Dcq.c40 if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) || in mana_ib_create_cq()
41 attr->cqe > U32_MAX / COMP_ENTRY_SIZE) { in mana_ib_create_cq()
42 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe); in mana_ib_create_cq()
46 cq->cqe = attr->cqe; in mana_ib_create_cq()
60 cq->cqe = buf_size / COMP_ENTRY_SIZE; in mana_ib_create_cq()
189 struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data; in handle_ud_sq_cqe()
205 struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data; in handle_ud_rq_cqe()
223 struct mana_ib_qp *qp = mana_get_qp_ref(mdev, cqe->wq_num, cqe->is_sq); in mana_handle_cqe()
229 if (cqe->is_sq) in mana_handle_cqe()
230 handle_ud_sq_cqe(qp, cqe); in mana_handle_cqe()
[all …]
/drivers/infiniband/sw/rdmavt/
A Dcq.c56 head = cq->ibcq.cqe; in rvt_cq_enter()
251 cq->ibcq.cqe = entries; in rvt_create_cq()
352 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq()
395 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
396 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
397 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
398 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
403 if (unlikely((u32)cqe < n)) { in rvt_resize_cq()
417 cq->ibcq.cqe = cqe; in rvt_resize_cq()
495 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
[all …]
/drivers/infiniband/hw/bnxt_re/
A Dqplib_fp.c2400 memset(cqe, 0, sizeof(*cqe)); in __flush_sq()
2407 cqe++; in __flush_sq()
2450 memset(cqe, 0, sizeof(*cqe)); in __flush_rq()
2456 cqe++; in __flush_rq()
2661 memset(cqe, 0, sizeof(*cqe)); in bnxt_qplib_cq_process_req()
2679 sq->swq_last, cqe->wr_id, cqe->status); in bnxt_qplib_cq_process_req()
2780 cqe++; in bnxt_qplib_cq_process_res_rc()
2797 cqe++; in bnxt_qplib_cq_process_res_rc()
2866 cqe++; in bnxt_qplib_cq_process_res_ud()
2884 cqe++; in bnxt_qplib_cq_process_res_ud()
[all …]
/drivers/net/ethernet/qlogic/qede/
A Dqede_fp.c661 cqe->header_len; in qede_set_gro_params()
871 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
877 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
963 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
965 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
988 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
990 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
1226 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe() argument
1251 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
1280 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
[all …]
/drivers/net/ethernet/marvell/octeontx2/nic/
A Dotx2_txrx.c241 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
325 if (cqe->sg.segs) in otx2_check_rcv_errors()
398 struct nix_cqe_rx_s *cqe; in otx2_rx_napi_handler() local
411 !cqe->sg.seg_addr) { in otx2_rx_napi_handler()
422 cqe->sg.seg_addr = 0x00; in otx2_rx_napi_handler()
469 struct nix_cqe_tx_s *cqe; in otx2_tx_napi_handler() local
491 if (unlikely(!cqe)) { in otx2_tx_napi_handler()
1278 struct nix_cqe_rx_s *cqe; in otx2_cleanup_rx_cqes() local
1302 if (!cqe) in otx2_cleanup_rx_cqes()
1304 if (cqe->sg.segs > 1) { in otx2_cleanup_rx_cqes()
[all …]
/drivers/infiniband/ulp/iser/
A Discsi_iser.h244 struct ib_cqe cqe; member
271 struct ib_cqe cqe; member
291 struct ib_cqe cqe; member
552 iser_rx(struct ib_cqe *cqe) in iser_rx() argument
554 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx()
558 iser_tx(struct ib_cqe *cqe) in iser_tx() argument
560 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx()
564 iser_login(struct ib_cqe *cqe) in iser_login() argument
566 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
/drivers/net/ethernet/huawei/hinic/
A Dhinic_hw_qp.c326 if (!rq->cqe) in alloc_rq_cqe()
336 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
338 if (!rq->cqe[i]) in alloc_rq_cqe()
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
352 vfree(rq->cqe); in alloc_rq_cqe()
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
372 vfree(rq->cqe); in free_rq_cqe()
850 cqe = rq->cqe[*cons_idx]; in hinic_rq_read_wqe()
902 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; in hinic_rq_put_wqe() local
925 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; in hinic_rq_get_sge() local
[all …]
/drivers/scsi/qedi/
A Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
331 idx = cqe->rqe_opaque; in qedi_get_rq_bdq_buf()
366 idx = cqe->rqe_opaque; in qedi_put_rq_bdq_buf()
409 union iscsi_cqe *cqe, in qedi_process_nopin_mesg() argument
576 union iscsi_cqe *cqe, in qedi_scsi_completion() argument
674 union iscsi_cqe *cqe, in qedi_mtask_completion() argument
734 u32 proto_itt = cqe->itid; in qedi_process_cmd_cleanup_resp()
743 iscsi_cid = cqe->conn_id; in qedi_process_cmd_cleanup_resp()
[all …]
/drivers/infiniband/hw/ocrdma/
A Docrdma_verbs.c974 int entries = attr->cqe; in ocrdma_create_cq()
1028 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1041 cqe = cq->va; in ocrdma_flush_cq()
1051 cqe++; in ocrdma_flush_cq()
1598 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1617 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1648 cqe->cmn.qpn = 0; in ocrdma_discard_cqes()
2445 if (is_cqe_for_sq(cqe)) { in ocrdma_set_cqe_status_flushed()
2699 if (is_cqe_imm(cqe)) { in ocrdma_poll_success_rcqe()
2765 struct ocrdma_cqe *cqe; in ocrdma_poll_hwcq() local
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
A Dmacsec.h32 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5e_macsec_is_rx_flow() argument
34 return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_macsec_is_rx_flow()
38 struct mlx5_cqe64 *cqe);
46 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5e_macsec_is_rx_flow() argument
49 struct mlx5_cqe64 *cqe) in mlx5e_macsec_offload_handle_rx_skb() argument
/drivers/net/ethernet/mellanox/mlx4/
A Den_rx.c635 struct mlx4_cqe *cqe; member
649 mlx4_en_get_cqe_ts(_ctx->cqe)); in mlx4_en_xdp_rx_timestamp()
657 struct mlx4_cqe *cqe = _ctx->cqe; in mlx4_en_xdp_rx_hash() local
665 status = cqe->status; in mlx4_en_xdp_rx_hash()
674 if (cqe->ipv6_ext_mask) in mlx4_en_xdp_rx_hash()
692 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local
782 length = be32_to_cpu(cqe->byte_cnt); in mlx4_en_process_rx_cq()
802 mxbuf.cqe = cqe; in mlx4_en_process_rx_cq()
908 if ((cqe->vlan_my_qpn & in mlx4_en_process_rx_cq()
912 be16_to_cpu(cqe->sl_vid)); in mlx4_en_process_rx_cq()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
A Dsend.c351 u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0; in hws_send_engine_dump_error_cqe()
353 u32 opcode = cqe ? get_cqe_opcode(cqe) : 0; in hws_send_engine_dump_error_cqe()
396 if (!cqe) { in hws_send_engine_dump_error_cqe()
461 struct mlx5_cqe64 *cqe) in hws_send_engine_update_rule() argument
529 if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) && in hws_send_engine_update()
539 &status, cqe); in hws_send_engine_update()
606 struct mlx5_cqe64 *cqe; in hws_send_engine_poll_cq() local
611 if (!cqe) in hws_send_engine_poll_cq()
890 struct mlx5_cqe64 *cqe; in hws_send_ring_alloc_cq() local
908 cqe->op_own = 0xf1; in hws_send_ring_alloc_cq()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
A Dxdp.c241 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_hash() local
249 hash_type = cqe->rss_hash_type; in mlx5e_xdp_rx_hash()
263 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_vlan_tag() local
265 if (!cqe_has_vlan(cqe)) in mlx5e_xdp_rx_vlan_tag()
280 struct mlx5_cqe64 *cqe; member
289 ts = get_cqe_ts(priv->cqe); in mlx5e_xsk_fill_timestamp()
658 struct mlx5_cqe64 *cqe) in mlx5e_free_xdpsq_desc() argument
723 .cqe = cqe, in mlx5e_free_xdpsq_desc()
747 struct mlx5_cqe64 *cqe; in mlx5e_poll_xdpsq_cq() local
760 if (!cqe) in mlx5e_poll_xdpsq_cq()
[all …]
/drivers/scsi/qedf/
A Dqedf.h259 struct fcoe_cqe cqe; member
496 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
499 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
501 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
506 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
519 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
526 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
528 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
530 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
539 struct fcoe_cqe *cqe);
[all …]

Completed in 107 milliseconds

12345678