/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | transobj.c | 118 *sqn = MLX5_GET(create_sq_out, out, sqn); in mlx5_core_create_sq() 125 MLX5_SET(modify_sq_in, in, sqn, sqn); in mlx5_core_modify_sq() 136 MLX5_SET(destroy_sq_in, in, sqn, sqn); in mlx5_core_destroy_sq() 145 MLX5_SET(query_sq_in, in, sqn, sqn); in mlx5_core_query_sq() 162 err = mlx5_core_query_sq(dev, sqn, out); in mlx5_core_query_sq_state() 294 struct mlx5_hairpin_params *params, u32 *sqn) in mlx5_hairpin_create_sq() argument 332 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]); in mlx5_hairpin_create_queues() 347 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); in mlx5_hairpin_destroy_queues() 388 return mlx5_core_modify_sq(peer_mdev, sqn, in); in mlx5_hairpin_modify_sq() 467 hp->sqn = hp->rqn + params->num_channels; in mlx5_core_hairpin_create() [all …]
|
A D | en.h | 446 u32 sqn; member 556 u32 sqn; member 588 u32 sqn; member 1113 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, 1130 u32 *sqn);
|
A D | en_rep.c | 425 rep_sq->sqn = sqns_array[i]; in mlx5e_sqs2vport_start() 486 sqs[num_sqs++] = c->sq[tc].sqn; in mlx5e_add_sqs_fwd_rules() 490 sqs[num_sqs++] = c->rq_xdpsq.sqn; in mlx5e_add_sqs_fwd_rules() 492 sqs[num_sqs++] = c->xdpsq.sqn; in mlx5e_add_sqs_fwd_rules() 499 sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn; in mlx5e_add_sqs_fwd_rules() 1501 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn); in mlx5e_vport_rep_event_pair()
|
A D | en_main.c | 1518 u32 *sqn) in mlx5e_create_sq() argument 1597 err = mlx5_core_modify_sq(mdev, sqn, in); in mlx5e_modify_sq() 1606 mlx5_core_destroy_sq(mdev, sqn); in mlx5e_destroy_sq() 1613 u32 *sqn) in mlx5e_create_sq_rdy() argument 1628 err = mlx5e_modify_sq(mdev, *sqn, &msp); in mlx5e_create_sq_rdy() 1630 mlx5e_destroy_sq(mdev, *sqn); in mlx5e_create_sq_rdy() 1722 mlx5e_destroy_sq(mdev, sq->sqn); in mlx5e_close_txqsq() 1767 mlx5e_destroy_sq(c->mdev, sq->sqn); in mlx5e_open_icosq() 1791 mlx5e_destroy_sq(c->mdev, sq->sqn); in mlx5e_close_icosq() 1873 mlx5e_destroy_sq(c->mdev, sq->sqn); in mlx5e_close_xdpsq() [all …]
|
A D | en_rep.h | 230 u32 sqn; member
|
A D | en_tx.c | 366 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_tx_flush() 389 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt); in mlx5e_txwqe_complete() 567 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); in mlx5e_tx_mpwqe_session_complete() 842 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_tx_cq()
|
A D | en_txrx.c | 84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq()
|
/linux-6.3-rc2/include/linux/mlx5/ |
A D | transobj.h | 46 u32 *sqn); 47 int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in); 48 void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); 49 int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); 50 int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); 77 u32 *sqn; member
|
A D | eswitch.h | 75 struct mlx5_eswitch_rep *rep, u32 sqn);
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | reporter_tx.c | 25 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush() 34 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc() 55 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); in mlx5e_tx_reporter_err_cqe_recover() 58 sq->sqn, err); in mlx5e_tx_reporter_err_cqe_recover() 76 err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn); in mlx5e_tx_reporter_err_cqe_recover() 161 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); in mlx5e_tx_reporter_build_diagnose_output_sq_common() 173 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); in mlx5e_tx_reporter_build_diagnose_output_sq_common() 445 key.index1 = sq->sqn; in mlx5e_tx_reporter_dump_sq() 515 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); in mlx5e_tx_reporter_dump_all_sqs() 525 err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); in mlx5e_tx_reporter_dump_all_sqs() [all …]
|
A D | reporter_rx.c | 50 icosq->sqn, icosq->cc, icosq->pc); in mlx5e_wait_for_icosq_flush() 58 icosq->sqn, icosq->cc, icosq->pc); in mlx5e_reset_icosq_cc_pc() 83 err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); in mlx5e_rx_reporter_err_icosq_cqe_recover() 86 icosq->sqn, err); in mlx5e_rx_reporter_err_icosq_cqe_recover() 105 err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn); in mlx5e_rx_reporter_err_icosq_cqe_recover() 198 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); in mlx5e_reporter_icosq_diagnose() 296 err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state); in mlx5e_rx_reporter_build_diagnose_output_rq_common() 516 key.index1 = icosq->sqn; in mlx5e_rx_reporter_dump_icosq() 682 snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn); in mlx5e_reporter_rx_timeout() 713 snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn); in mlx5e_reporter_icosq_cqe_err()
|
A D | health.c | 160 int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn) in mlx5e_health_sq_to_ready() argument 168 err = mlx5e_modify_sq(mdev, sqn, &msp); in mlx5e_health_sq_to_ready() 170 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn); in mlx5e_health_sq_to_ready() 178 err = mlx5e_modify_sq(mdev, sqn, &msp); in mlx5e_health_sq_to_ready() 180 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn); in mlx5e_health_sq_to_ready()
|
A D | txrx.h | 112 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop() argument 121 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); in mlx5e_post_nop() 129 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) in mlx5e_post_nop_fence() argument 138 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); in mlx5e_post_nop_fence() 175 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi() 236 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_icosq_get_next_pi()
|
A D | health.h | 44 int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn);
|
A D | xdp.c | 248 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_xdpsq_get_next_pi() 291 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); in mlx5e_xdp_mpwqe_complete() 487 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); in mlx5e_xmit_xdp_frame() 589 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_xdpsq_cq()
|
A D | ptp.c | 285 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) in mlx5e_ptp_destroy_sq() argument 287 mlx5_core_destroy_sq(mdev, sqn); in mlx5e_ptp_destroy_sq() 345 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn); in mlx5e_ptp_open_txqsq() 369 mlx5e_ptp_destroy_sq(mdev, sq->sqn); in mlx5e_ptp_close_txqsq()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
A D | aso.c | 33 u32 sqn; member 214 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); in create_aso_sq() 221 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn) in mlx5_aso_set_sq_rdy() argument 235 err = mlx5_core_modify_sq(mdev, sqn, in); in mlx5_aso_set_sq_rdy() 251 err = mlx5_aso_set_sq_rdy(mdev, sq->sqn); in mlx5_aso_create_sq_rdy() 253 mlx5_core_destroy_sq(mdev, sq->sqn); in mlx5_aso_create_sq_rdy() 265 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); in mlx5_aso_destroy_sq() 296 mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn); in mlx5_aso_create_sq() 351 cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); in mlx5_aso_build_wqe()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
A D | ktls_txrx.c | 75 u16 pc, u32 sqn, in mlx5e_ktls_build_static_params() argument 89 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_ktls_build_static_params() 118 u16 pc, u32 sqn, in mlx5e_ktls_build_progress_params() argument 132 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_ktls_build_progress_params()
|
A D | ktls_utils.h | 80 u16 pc, u32 sqn, 86 u16 pc, u32 sqn,
|
A D | ktls_tx.c | 557 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params() 576 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0, in post_progress_params() 589 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); in tx_post_fence_nop() 733 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); in tx_post_resync_dump()
|
A D | ktls_rx.c | 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params() 180 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, in post_progress_params() 298 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); in resync_post_get_progress_params()
|
/linux-6.3-rc2/drivers/infiniband/hw/mlx5/ |
A D | qpc.c | 596 static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) in destroy_sq_tracked() argument 601 MLX5_SET(destroy_sq_in, in, sqn, sqn); in destroy_sq_tracked() 617 sq->qpn = MLX5_GET(create_sq_out, out, sqn); in mlx5_core_create_sq_tracked()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
A D | tx.c | 56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err()
|
/linux-6.3-rc2/include/uapi/rdma/ |
A D | mlx5-abi.h | 399 __u32 sqn; member
|
/linux-6.3-rc2/drivers/scsi/bnx2i/ |
A D | bnx2i_hwi.c | 156 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) in bnx2i_arm_cq_event_coalescing() 176 cq_db->sqn[0] = cq_index; in bnx2i_arm_cq_event_coalescing() 1127 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); in bnx2i_alloc_qp_resc()
|