| /linux/drivers/net/ethernet/cavium/thunder/ |
| A D | nicvf_queues.c | 994 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) in nicvf_free_resources() 998 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_free_resources() 1002 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_free_resources() 1019 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { in nicvf_alloc_resources() 1025 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_alloc_resources() 1086 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_config_data_transfer() 1088 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_config_data_transfer() 1092 for (qidx = 0; qidx < qs->rq_cnt; qidx++) in nicvf_config_data_transfer() 1095 for (qidx = 0; qidx < qs->rq_cnt; qidx++) in nicvf_config_data_transfer() 1099 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_config_data_transfer() [all …]
|
| A D | nicvf_main.c | 995 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_handle_qs_err() 1315 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { in nicvf_free_cq_poll() 1347 for (qidx = 0; qidx < nic->sqs_count; qidx++) { in nicvf_stop() 1356 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { in nicvf_stop() 1372 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { in nicvf_stop() 1467 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_open() 1544 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_open() 1548 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) in nicvf_open() 1568 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_open() 1714 for (qidx = 0; qidx < qs->rq_cnt; qidx++) in nicvf_update_stats() [all …]
|
| A D | nicvf_ethtool.c | 214 int stats, qidx; in nicvf_get_qset_strings() local 217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 219 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 227 sprintf(*data, "txq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 301 int stat, qidx; in nicvf_get_qset_stats() local 306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 307 nicvf_update_rq_stats(nic, qidx); in nicvf_get_qset_stats() 309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() [all …]
|
| A D | nicvf_queues.h | 336 int qidx, bool enable); 338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 339 void nicvf_sq_disable(struct nicvf *nic, int qidx); 342 struct snd_queue *sq, int qidx); 365 u64 qidx, u64 val); 367 u64 offset, u64 qidx);
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| A D | otx2_common.c | 719 for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { in otx2_sqb_flush() 775 aq->qidx = qidx; in otx2_rq_init() 813 aq->qidx = qidx; in otx2_sq_aq_init() 942 aq->qidx = qidx; in otx2_cq_init() 992 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_config_nix_queues() 1001 for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { in otx2_config_nix_queues() 1010 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues() 1025 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues() 1086 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { in otx2_sq_free_sqbs() 1298 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { in otx2_sq_aura_pool_init() [all …]
|
| A D | cn10k.c | 75 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) in cn10k_sq_aq_init() argument 85 aq->sq.cq = pfvf->hw.rx_queues + qidx; in cn10k_sq_aq_init() 103 aq->qidx = qidx; in cn10k_sq_aq_init() 320 aq->qidx = rq_idx; in cn10k_map_unmap_rq_policer() 344 int qidx, rc; in cn10k_free_matchall_ipolicer() local 349 for (qidx = 0; qidx < hw->rx_queues; qidx++) in cn10k_free_matchall_ipolicer() 350 cn10k_map_unmap_rq_policer(pfvf, qidx, in cn10k_free_matchall_ipolicer() 453 aq->qidx = profile; in cn10k_set_ipolicer_rate() 464 int qidx, rc; in cn10k_set_matchall_ipolicer_rate() local 473 for (qidx = 0; qidx < hw->rx_queues; qidx++) { in cn10k_set_matchall_ipolicer_rate() [all …]
|
| A D | otx2_pf.c | 1166 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler() 1191 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { in otx2_q_intr_handler() 1261 int qidx; in otx2_disable_napi() local 1263 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_disable_napi() 1278 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { in otx2_free_cq_res() 1294 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { in otx2_free_sq_res() 1458 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { in otx2_free_hw_resources() 1586 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_open() 1645 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_open() 1765 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { in otx2_stop() [all …]
|
| A D | otx2_txrx.c | 186 int qidx) in otx2_skb_add_frag() argument 248 int qidx) in otx2_free_rcv_seg() argument 327 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors() 530 int size, int qidx) in otx2_sqe_flush() argument 693 sqe_hdr->sq = qidx; in otx2_sqe_add_hdr() 946 struct sk_buff *skb, u16 qidx) in otx2_sq_append_skb() argument 1129 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt() 1141 sqe_hdr->sq = qidx; in otx2_xdp_sq_append_pkt() 1162 int qidx = cq->cq_idx; in otx2_xdp_rcv_pkt_handler() local 1186 qidx += pfvf->hw.tx_queues; in otx2_xdp_rcv_pkt_handler() [all …]
|
| A D | otx2_ethtool.c | 86 int qidx, stats; in otx2_get_qset_strings() local 88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_strings() 95 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { in otx2_get_qset_strings() 147 int stat, qidx; in otx2_get_qset_stats() local 151 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_stats() 152 if (!otx2_update_rq_stats(pfvf, qidx)) { in otx2_get_qset_stats() 162 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { in otx2_get_qset_stats() 163 if (!otx2_update_sq_stats(pfvf, qidx)) { in otx2_get_qset_stats() 441 int qidx; in otx2_set_coalesce() local 485 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) in otx2_set_coalesce() [all …]
|
| A D | otx2_txrx.h | 162 struct sk_buff *skb, u16 qidx); 164 int size, int qidx); 166 int size, int qidx);
|
| A D | cn10k.h | 28 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); 29 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
|
| A D | otx2_common.h | 317 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 319 int size, int qidx); 771 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 797 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 798 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 831 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 832 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 863 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
|
| A D | otx2_vf.c | 381 int qidx = skb_get_queue_mapping(skb); in otx2vf_xmit() local 385 sq = &vf->qset.sq[qidx]; in otx2vf_xmit() 386 txq = netdev_get_tx_queue(netdev, qidx); in otx2vf_xmit() 388 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { in otx2vf_xmit()
|
| /linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/ |
| A D | chcr_ipsec.c | 418 u32 qidx; in copy_esn_pktxt() local 426 qidx = skb->queue_mapping; in copy_esn_pktxt() 427 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_esn_pktxt() 468 u32 ctrl0, qidx; in copy_cpltx_pktxt() local 474 qidx = skb->queue_mapping; in copy_cpltx_pktxt() 475 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_cpltx_pktxt() 513 unsigned int qidx; in copy_key_cpltx_pktxt() local 517 qidx = skb->queue_mapping; in copy_key_cpltx_pktxt() 573 int qidx = skb_get_queue_mapping(skb); in ch_ipsec_crypto_wreq() local 711 int qidx, left, credits; in ch_ipsec_xmit() local [all …]
|
| /linux/drivers/dma/ptdma/ |
| A D | ptdma-dev.c | 72 u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; in pt_core_execute_cmd() 83 cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN; in pt_core_execute_cmd() 89 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_core_execute_cmd() 139 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_do_cmd_complete() 213 cmd_q->qidx = 0; in pt_core_init()
|
| /linux/drivers/net/ethernet/broadcom/bnxt/ |
| A D | bnxt_dcb.c | 55 u8 qidx; in bnxt_hwrm_queue_pri2cos_cfg() local 60 qidx = bp->tc_to_qidx[ets->prio_tc[i]]; in bnxt_hwrm_queue_pri2cos_cfg() 61 pri2cos[i] = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_pri2cos_cfg() 109 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_cos2bw_cfg() local 113 qidx); in bnxt_hwrm_queue_cos2bw_cfg() 116 cos2bw.queue_id = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_cos2bw_cfg() 132 data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); in bnxt_hwrm_queue_cos2bw_cfg() 134 if (qidx == 0) { in bnxt_hwrm_queue_cos2bw_cfg() 269 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_pfc_cfg() local 271 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { in bnxt_hwrm_queue_pfc_cfg()
|
| /linux/drivers/scsi/csiostor/ |
| A D | csio_wr.c | 745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) in csio_wr_cleanup_eq_stpg() argument 747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; in csio_wr_cleanup_eq_stpg() 762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) in csio_wr_cleanup_iq_ftr() argument 765 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_cleanup_iq_ftr() 867 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_get() 878 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_get() 982 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) in csio_wr_issue() argument 985 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_issue() 987 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_issue() 1263 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx, in csio_wr_process_iq_idx() argument [all …]
|
| /linux/drivers/scsi/qla2xxx/ |
| A D | qla_nvme.c | 90 unsigned int qidx, u16 qsize, void **handle) in qla_nvme_alloc_queue() argument 97 if (qidx) in qla_nvme_alloc_queue() 98 qidx--; in qla_nvme_alloc_queue() 105 __func__, handle, qidx, qsize); in qla_nvme_alloc_queue() 107 if (qidx > qla_nvme_fc_transport.max_hw_queues) { in qla_nvme_alloc_queue() 110 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); in qla_nvme_alloc_queue() 118 if (ha->queue_pair_map[qidx]) { in qla_nvme_alloc_queue() 119 *handle = ha->queue_pair_map[qidx]; in qla_nvme_alloc_queue() 122 *handle, qidx); in qla_nvme_alloc_queue()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_amdkfd_gfx_v9.c | 810 int qidx; in kgd_gfx_v9_get_cu_occupancy() local 851 for (qidx = 0; qidx < max_queue_cnt; qidx++) { in kgd_gfx_v9_get_cu_occupancy() 856 if (!test_bit(qidx, cp_queue_bitmap)) in kgd_gfx_v9_get_cu_occupancy() 859 if (!(queue_map & (1 << qidx))) in kgd_gfx_v9_get_cu_occupancy() 863 get_wave_count(adev, qidx, &wave_cnt, &vmid); in kgd_gfx_v9_get_cu_occupancy()
|
| /linux/drivers/net/ethernet/marvell/octeontx2/af/ |
| A D | rvu_nix.c | 924 inst.cindex = req->qidx; in rvu_nix_blk_aq_enq_inst() 1177 int qidx, q_cnt = 0; in nix_lf_hwctx_disable() local 1210 for (qidx = 0; qidx < q_cnt; qidx++) { in nix_lf_hwctx_disable() 1211 if (!test_bit(qidx, bmap)) in nix_lf_hwctx_disable() 1213 aq_req.qidx = qidx; in nix_lf_hwctx_disable() 1242 lock_ctx_req.qidx = req->qidx; in nix_lf_hwctx_lockdown() 2752 aq_req.qidx = mce; in nix_blk_setup_mce() 4994 layer = (req->qidx >> 14) & 0x03; in nix_verify_bandprof() 4995 prof_idx = req->qidx & 0x3FFF; in nix_verify_bandprof() 5171 aq_req->qidx = qidx; in nix_aq_context_read() [all …]
|
| /linux/drivers/scsi/lpfc/ |
| A D | lpfc_nvme.c | 83 unsigned int qidx, u16 qsize, in lpfc_nvme_create_queue() argument 101 qhandle->qidx = qidx; in lpfc_nvme_create_queue() 107 if (qidx) { in lpfc_nvme_create_queue() 109 qhandle->index = ((qidx - 1) % in lpfc_nvme_create_queue() 113 qhandle->index = qidx; in lpfc_nvme_create_queue() 119 qidx, qhandle->cpu_id, qhandle->index, qhandle); in lpfc_nvme_create_queue() 140 unsigned int qidx, in lpfc_nvme_delete_queue() argument 154 lport, qidx, handle); in lpfc_nvme_delete_queue() 1576 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { in lpfc_nvme_fcp_io_submit() 1651 lpfc_ncmd->qidx = lpfc_queue_info->qidx; in lpfc_nvme_fcp_io_submit()
|
| A D | lpfc_debugfs.h | 522 lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) in lpfc_debug_dump_hba_eq() argument 526 qp = phba->sli4_hba.hdwq[qidx].hba_eq; in lpfc_debug_dump_hba_eq() 528 pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); in lpfc_debug_dump_hba_eq()
|
| /linux/include/linux/ |
| A D | nvme-fc-driver.h | 480 unsigned int qidx, u16 qsize, 483 unsigned int qidx, void *handle);
|
| /linux/drivers/scsi/mpi3mr/ |
| A D | mpi3mr.h | 174 #define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset) argument 894 u64 *reply_dma, u16 qidx);
|
| A D | mpi3mr_fw.c | 1357 reply_qid = mrioc->op_reply_qinfo[qidx].qid; in mpi3mr_delete_op_reply_q() 1359 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); in mpi3mr_delete_op_reply_q() 1413 mpi3mr_free_op_reply_q_segments(mrioc, qidx); in mpi3mr_delete_op_reply_q() 1434 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; in mpi3mr_alloc_op_reply_q_segments() 1491 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; in mpi3mr_alloc_op_req_q_segments() 1550 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; in mpi3mr_create_op_reply_q() 1566 reply_qid = qidx + 1; in mpi3mr_create_op_reply_q() 1575 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); in mpi3mr_create_op_reply_q() 1577 mpi3mr_free_op_reply_q_segments(mrioc, qidx); in mpi3mr_create_op_reply_q() 3441 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; in mpi3mr_memset_op_reply_q_buffers() [all …]
|