| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | wc.c | 90 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); in create_wc_cq() 96 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in create_wc_cq() 100 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in create_wc_cq() 111 void *cqc; in mlx5_wc_create_cq() local 114 cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); in mlx5_wc_create_cq() 115 if (!cqc) in mlx5_wc_create_cq() 118 MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ); in mlx5_wc_create_cq() 119 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5_wc_create_cq() 121 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); in mlx5_wc_create_cq() 135 kvfree(cqc); in mlx5_wc_create_cq() [all …]
|
| A D | cq.c | 103 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), in mlx5_create_cq() 227 void *cqc; in mlx5_core_modify_cq_moderation() local 230 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); in mlx5_core_modify_cq_moderation() 231 MLX5_SET(cqc, cqc, cq_period, cq_period); in mlx5_core_modify_cq_moderation() 232 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); in mlx5_core_modify_cq_moderation()
|
| A D | wq.c | 160 void *cqc, struct mlx5_cqwq *wq, in mlx5_cqwq_create() argument 164 u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7; in mlx5_cqwq_create() 165 u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); in mlx5_cqwq_create()
|
| A D | en_main.c | 2255 void *cqc; in mlx5e_create_cq() local 2272 memcpy(cqc, param->cqc, sizeof(param->cqc)); in mlx5e_create_cq() 2279 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5e_create_cq() 2280 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5e_create_cq() 2281 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in mlx5e_create_cq() 2283 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in mlx5e_create_cq() 2338 void *cqc; in mlx5e_modify_cq_period_mode() local 2354 void *cqc; in mlx5e_modify_cq_moderation() local 2358 MLX5_SET(cqc, cqc, cq_period, cq_period); in mlx5e_modify_cq_moderation() 2359 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); in mlx5e_modify_cq_moderation() [all …]
|
| A D | debugfs.c | 448 param = 1 << MLX5_GET(cqc, ctx, log_cq_size); in cq_read_field() 451 param = MLX5_GET(cqc, ctx, log_page_size); in cq_read_field()
|
| A D | wq.h | 90 void *cqc, struct mlx5_cqwq *wq,
|
| /drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| A D | aso.c | 80 void *in, *cqc; in create_aso_cq() local 94 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in create_aso_cq() 96 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); in create_aso_cq() 101 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); in create_aso_cq() 102 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in create_aso_cq() 103 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in create_aso_cq() 104 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in create_aso_cq() 106 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in create_aso_cq() 127 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); in mlx5_aso_create_cq() 131 MLX5_SET(cqc, cqc_data, log_cq_size, 1); in mlx5_aso_create_cq() [all …]
|
| /drivers/infiniband/hw/mlx5/ |
| A D | cq.c | 791 MLX5_SET(cqc, cqc, log_page_size, in create_cq_user() 831 MLX5_SET(cqc, cqc, cqe_comp_en, 1); in create_cq_user() 922 MLX5_SET(cqc, cqc, log_page_size, in create_cq_kernel() 1012 MLX5_SET(cqc, cqc, cqe_sz, in mlx5_ib_create_cq() 1016 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); in mlx5_ib_create_cq() 1017 MLX5_SET(cqc, cqc, uar_page, index); in mlx5_ib_create_cq() 1018 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5_ib_create_cq() 1019 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); in mlx5_ib_create_cq() 1021 MLX5_SET(cqc, cqc, oi, 1); in mlx5_ib_create_cq() 1371 MLX5_SET(cqc, cqc, log_page_size, in mlx5_ib_resize_cq() [all …]
|
| A D | devx.c | 742 void *cqc; in devx_set_umem_valid() local 745 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in devx_set_umem_valid() 746 MLX5_SET(cqc, cqc, dbr_umem_valid, 1); in devx_set_umem_valid() 1503 !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq)) in is_apu_cq()
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | params.c | 811 void *cqc = param->cqc; in mlx5e_build_common_cq_param() local 813 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5e_build_common_cq_param() 815 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); in mlx5e_build_common_cq_param() 841 void *cqc = param->cqc; in mlx5e_build_rx_cq_param() local 857 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); in mlx5e_build_rx_cq_param() 859 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? in mlx5e_build_rx_cq_param() 861 MLX5_SET(cqc, cqc, cqe_compression_layout, in mlx5e_build_rx_cq_param() 865 MLX5_SET(cqc, cqc, cqe_comp_en, 1); in mlx5e_build_rx_cq_param() 977 void *cqc = param->cqc; in mlx5e_build_tx_cq_param() local 1019 void *cqc = param->cqc; in mlx5e_build_ico_cq_param() local [all …]
|
| A D | health.c | 24 void *cqc; in mlx5e_health_cq_diag_fmsg() local 27 cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); in mlx5e_health_cq_diag_fmsg() 28 hw_status = MLX5_GET(cqc, cqc, status); in mlx5e_health_cq_diag_fmsg()
|
| A D | params.h | 16 u32 cqc[MLX5_ST_SZ_DW(cqc)]; member
|
| /drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| A D | send.c | 923 void *in, *cqc; in hws_send_ring_create_cq() local 937 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in hws_send_ring_create_cq() 938 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); in hws_send_ring_create_cq() 942 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in hws_send_ring_create_cq() 943 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in hws_send_ring_create_cq() 944 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in hws_send_ring_create_cq() 945 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in hws_send_ring_create_cq() 962 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); in hws_send_ring_open_cq() 966 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); in hws_send_ring_open_cq() 967 MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries); in hws_send_ring_open_cq() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
| A D | conn.c | 415 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; in mlx5_fpga_conn_create_cq() 420 void *cqc, *in; in mlx5_fpga_conn_create_cq() local 425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 454 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5_fpga_conn_create_cq() 455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 456 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5_fpga_conn_create_cq() 457 MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); in mlx5_fpga_conn_create_cq() 458 MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - in mlx5_fpga_conn_create_cq() 460 MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); in mlx5_fpga_conn_create_cq()
|
| /drivers/crypto/hisilicon/ |
| A D | debugfs.c | 208 struct qm_cqc cqc; in qm_cqc_dump() local 221 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); in qm_cqc_dump() 223 cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); in qm_cqc_dump() 224 cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); in qm_cqc_dump() 225 dump_show(qm, &cqc, sizeof(struct qm_cqc), name); in qm_cqc_dump() 231 if (qm->cqc) { in qm_cqc_dump() 232 memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc)); in qm_cqc_dump() 233 cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); in qm_cqc_dump() 234 cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); in qm_cqc_dump() 235 dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC"); in qm_cqc_dump()
|
| A D | qm.c | 59 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) argument 688 tmp_xqc = qm->xqc_buf.cqc; in qm_set_and_get_xqc() 2111 struct qm_cqc cqc = {0}; in qm_cq_ctx_cfg() local 2115 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg() 2118 cqc.w8 = 0; /* rand_qc */ in qm_cq_ctx_cfg() 2128 cqc.pasid = cpu_to_le16(pasid); in qm_cq_ctx_cfg() 2131 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); in qm_cq_ctx_cfg() 2220 struct qm_cqc cqc; in qm_wait_qp_empty() local 2238 if ((sqc.tail == cqc.tail) && in qm_wait_qp_empty() 5518 QM_XQC_BUF_INIT(xqc_buf, cqc); in hisi_qm_alloc_rsv_buf() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
| A D | dr_send.c | 1062 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {}; in dr_create_cq() 1068 void *cqc, *in; in dr_create_cq() local 1077 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq() 1104 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in dr_create_cq() 1105 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq() 1106 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in dr_create_cq() 1107 MLX5_SET(cqc, cqc, uar_page, uar->index); in dr_create_cq() 1108 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in dr_create_cq() 1110 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in dr_create_cq()
|
| /drivers/vfio/pci/mlx5/ |
| A D | cmd.c | 1161 void *cqc, *in; in mlx5vf_create_cq() local 1194 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5vf_create_cq() 1195 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); in mlx5vf_create_cq() 1196 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5vf_create_cq() 1197 MLX5_SET(cqc, cqc, uar_page, tracker->uar->index); in mlx5vf_create_cq() 1198 MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift - in mlx5vf_create_cq() 1200 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); in mlx5vf_create_cq()
|
| /drivers/vdpa/mlx5/net/ |
| A D | mlx5_vnet.c | 564 void *cqc; in cq_create() local 595 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create() 596 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create() 605 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create() 606 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent)); in cq_create() 607 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create() 608 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in cq_create() 609 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| A D | resource_tracker.c | 3077 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) in cq_get_mtt_addr() argument 3079 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; in cq_get_mtt_addr() 3082 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) in cq_get_mtt_size() argument 3084 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; in cq_get_mtt_size() 3085 int page_shift = (cqc->log_page_size & 0x3f) + 12; in cq_get_mtt_size() 3442 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper() local 3443 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_CQ_wrapper() 3453 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in mlx4_SW2HW_CQ_wrapper() 3531 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize() local 3532 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in handle_resize() [all …]
|