/linux-6.3-rc2/drivers/ufs/core/ |
A D | ufs-mcq.c | 196 struct ufs_hw_queue *hwq; in ufshcd_mcq_memory_alloc() local 201 hwq = &hba->uhq[i]; in ufshcd_mcq_memory_alloc() 204 hwq->max_entries; in ufshcd_mcq_memory_alloc() 206 &hwq->sqe_dma_addr, in ufshcd_mcq_memory_alloc() 208 if (!hwq->sqe_dma_addr) { in ufshcd_mcq_memory_alloc() 304 spin_lock(&hwq->cq_lock); in ufshcd_mcq_poll_cqe_lock() 313 struct ufs_hw_queue *hwq; in ufshcd_mcq_make_queues_operational() local 318 hwq = &hba->uhq[i]; in ufshcd_mcq_make_queues_operational() 319 hwq->id = i; in ufshcd_mcq_make_queues_operational() 355 hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0; in ufshcd_mcq_make_queues_operational() [all …]
|
A D | ufshcd-priv.h | 75 struct ufs_hw_queue *hwq); 79 struct ufs_hw_queue *hwq);
|
A D | ufshcd.c | 2202 struct ufs_hw_queue *hwq) in ufshcd_send_command() argument 2219 spin_lock(&hwq->sq_lock); in ufshcd_send_command() 2220 memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size), in ufshcd_send_command() 2222 ufshcd_inc_sq_tail(hwq); in ufshcd_send_command() 2223 spin_unlock(&hwq->sq_lock); in ufshcd_send_command() 2863 struct ufs_hw_queue *hwq = NULL; in ufshcd_queuecommand() local 2951 ufshcd_send_command(hba, tag, hwq); in ufshcd_queuecommand() 5483 struct ufs_hw_queue *hwq; in ufshcd_poll() local 6730 struct ufs_hw_queue *hwq; in ufshcd_handle_mcq_cq_events() local 6743 hwq = &hba->uhq[i]; in ufshcd_handle_mcq_cq_events() [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/bnxt_re/ |
A D | qplib_res.h | 66 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) argument 68 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ argument 69 ((HWQ_CMP(hwq->prod, hwq)\ 70 - HWQ_CMP(hwq->cons, hwq))\ 321 return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx); in bnxt_qplib_get_qe() 326 idx += hwq->prod; in bnxt_qplib_get_prod_qe() 328 idx -= hwq->depth; in bnxt_qplib_get_prod_qe() 368 hwq->prod = (hwq->prod + cnt) % hwq->depth; in bnxt_qplib_hwq_incr_prod() 374 hwq->cons = (hwq->cons + cnt) % hwq->depth; in bnxt_qplib_hwq_incr_cons() 382 key = info->hwq->cons & (info->hwq->max_elements - 1); in bnxt_qplib_ring_db32() [all …]
|
A D | qplib_fp.c | 660 srq->dbinfo.hwq = &srq->hwq; in bnxt_qplib_create_srq() 902 sq->dbinfo.hwq = &sq->hwq; in bnxt_qplib_create_qp1() 907 rq->dbinfo.hwq = &rq->hwq; in bnxt_qplib_create_qp1() 940 hwq = &sq->hwq; in bnxt_qplib_init_psn_ptr() 1111 sq->dbinfo.hwq = &sq->hwq; in bnxt_qplib_create_qp() 1116 rq->dbinfo.hwq = &rq->hwq; in bnxt_qplib_create_qp() 1597 hwq = &qp->sq.hwq; in bnxt_qplib_put_inline() 1683 hwq = &sq->hwq; in bnxt_qplib_pull_psn_buff() 1718 hwq = &sq->hwq; in bnxt_qplib_post_send() 1732 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta); in bnxt_qplib_post_send() [all …]
|
A D | qplib_res.c | 167 if (i == hwq->level) in bnxt_qplib_free_hwq() 168 __free_pbl(res, &hwq->pbl[i], hwq->is_user); in bnxt_qplib_free_hwq() 176 hwq->prod = 0; in bnxt_qplib_free_hwq() 177 hwq->cons = 0; in bnxt_qplib_free_hwq() 178 hwq->cp_bit = 0; in bnxt_qplib_free_hwq() 350 hwq->prod = 0; in bnxt_qplib_alloc_init_hwq() 351 hwq->cons = 0; in bnxt_qplib_alloc_init_hwq() 352 hwq->pdev = pdev; in bnxt_qplib_alloc_init_hwq() 358 lvl = hwq->level; in bnxt_qplib_alloc_init_hwq() 361 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr; in bnxt_qplib_alloc_init_hwq() [all …]
|
A D | qplib_rcfw.c | 92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message() local 168 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message() 180 hwq->prod++; in __send_message() 184 cmdq_prod = hwq->prod; in __send_message() 305 struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq; in bnxt_qplib_process_qp_event() local 382 struct bnxt_qplib_hwq *hwq = &creq->hwq; in bnxt_qplib_service_creq() local 389 raw_cons = hwq->cons; in bnxt_qplib_service_creq() 427 hwq->cons = raw_cons; in bnxt_qplib_service_creq() 442 hwq = &creq->hwq; in bnxt_qplib_creq_irq() 444 sw_cons = HWQ_CMP(hwq->cons, hwq); in bnxt_qplib_creq_irq() [all …]
|
A D | qplib_fp.h | 101 struct bnxt_qplib_hwq hwq; member 249 struct bnxt_qplib_hwq hwq; member 358 struct bnxt_qplib_hwq *hwq; in bnxt_qplib_queue_full() local 361 hwq = &que->hwq; in bnxt_qplib_queue_full() 363 avail = hwq->cons - hwq->prod; in bnxt_qplib_queue_full() 364 if (hwq->cons <= hwq->prod) in bnxt_qplib_queue_full() 365 avail += hwq->depth; in bnxt_qplib_queue_full() 402 struct bnxt_qplib_hwq hwq; member 475 struct bnxt_qplib_hwq hwq; member
|
A D | qplib_sp.c | 499 if (mrw->hwq.max_elements) in bnxt_qplib_free_mrw() 500 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_free_mrw() 558 if (mrw->hwq.max_elements) { in bnxt_qplib_dereg_mrw() 561 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_dereg_mrw() 585 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr() 586 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr() 608 if (mr->hwq.level == PBL_LVL_MAX) { in bnxt_qplib_reg_mr() 614 level = mr->hwq.level; in bnxt_qplib_reg_mr() 638 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr() 639 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr() [all …]
|
A D | qplib_rcfw.h | 150 struct bnxt_qplib_hwq hwq; member 169 struct bnxt_qplib_hwq hwq; member
|
A D | qplib_sp.h | 118 struct bnxt_qplib_hwq hwq; member 123 struct bnxt_qplib_hwq hwq; member
|
A D | main.c | 861 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; in bnxt_re_alloc_res() 869 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res() 870 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res() 1266 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_dev_init() 1267 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bnxt_re_dev_init()
|
A D | ib_verbs.c | 2457 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; in bnxt_re_build_reg_wqe() 2458 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; in bnxt_re_build_reg_wqe() 2461 wqe->frmr.levels = qplib_frpl->hwq.level; in bnxt_re_build_reg_wqe() 2895 resp.tail = cq->qplib_cq.hwq.cons; in bnxt_re_create_cq() 3407 lib_qp->id, lib_qp->sq.hwq.prod, in send_phantom_wqe() 3408 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), in send_phantom_wqe() 3595 mr->qplib_mr.hwq.level = PBL_LVL_MAX; in bnxt_re_get_dma_mr()
|
/linux-6.3-rc2/drivers/scsi/cxlflash/ |
A D | main.c | 693 struct hwq *hwq; in stop_afu() local 732 struct hwq *hwq; in term_intr() local 778 struct hwq *hwq; in term_mc() local 1334 struct hwq *hwq = (struct hwq *)data; in cxlflash_sync_err_irq() local 1472 struct hwq *hwq = (struct hwq *)data; in cxlflash_rrq_irq() local 1540 struct hwq *hwq = (struct hwq *)data; in cxlflash_async_err_irq() local 1711 struct hwq *hwq; in init_pcr() local 1746 struct hwq *hwq; in init_global() local 1872 struct hwq *hwq; in start_afu() local 2097 struct hwq *hwq; in init_afu() local [all …]
|
A D | common.h | 196 struct hwq { struct 231 struct hwq hwqs[CXLFLASH_MAX_HWQS]; argument 233 int (*context_reset)(struct hwq *hwq); 255 static inline struct hwq *get_hwq(struct afu *afu, u32 index) in get_hwq()
|
A D | superpipe.c | 267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in afu_attach() local 291 val = hwq->ctx_hndl; in afu_attach() 298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); in afu_attach() 1660 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in cxlflash_afu_recover() local 1737 reg = readq_be(&hwq->ctrl_map->mbox_r); in cxlflash_afu_recover()
|
/linux-6.3-rc2/include/scsi/ |
A D | scsi_tcq.h | 26 u16 hwq; in scsi_host_find_tag() local 31 hwq = blk_mq_unique_tag_to_hwq(tag); in scsi_host_find_tag() 32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
/linux-6.3-rc2/drivers/net/wireless/ti/wlcore/ |
A D | tx.c | 1199 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_stop_queue_locked() local 1200 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked() 1203 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked() 1208 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked() 1225 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_wake_queue() local 1232 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue() 1235 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue() 1302 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_by_reason_locked() local 1305 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked() 1311 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_locked() local [all …]
|
/linux-6.3-rc2/drivers/net/wireless/mediatek/mt76/ |
A D | tx.c | 393 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; in mt76_release_buffered_frames() local 396 spin_lock_bh(&hwq->lock); in mt76_release_buffered_frames() 420 dev->queue_ops->kick(dev, hwq); in mt76_release_buffered_frames() 425 spin_unlock_bh(&hwq->lock); in mt76_release_buffered_frames() 626 struct mt76_queue *hwq; in mt76_stop_tx_queues() local 632 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; in mt76_stop_tx_queues() 635 spin_lock_bh(&hwq->lock); in mt76_stop_tx_queues() 637 spin_unlock_bh(&hwq->lock); in mt76_stop_tx_queues()
|
A D | mac80211.c | 1666 struct mt76_queue *hwq; in mt76_init_queue() local 1669 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); in mt76_init_queue() 1670 if (!hwq) in mt76_init_queue() 1673 hwq->flags = flags; in mt76_init_queue() 1675 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); in mt76_init_queue() 1679 return hwq; in mt76_init_queue()
|
/linux-6.3-rc2/drivers/scsi/ |
A D | virtio_scsi.c | 558 u16 hwq = blk_mq_unique_tag_to_hwq(tag); in virtscsi_pick_vq_mq() local 560 return &vscsi->req_vqs[hwq]; in virtscsi_pick_vq_mq() 732 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) in virtscsi_commit_rqs() argument 736 virtscsi_kick_vq(&vscsi->req_vqs[hwq]); in virtscsi_commit_rqs()
|
/linux-6.3-rc2/drivers/net/wireless/intel/iwlegacy/ |
A D | common.h | 2246 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument 2249 BUG_ON(hwq > 31); /* only use 5 bits */ in il_set_swq_id() 2251 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id() 2272 u8 hwq = (queue >> 2) & 0x1f; in il_wake_queue() local 2274 if (test_and_clear_bit(hwq, il->queue_stopped)) in il_wake_queue() 2283 u8 hwq = (queue >> 2) & 0x1f; in il_stop_queue() local 2285 if (!test_and_set_bit(hwq, il->queue_stopped)) in il_stop_queue()
|
/linux-6.3-rc2/drivers/block/ |
A D | nbd.c | 731 u16 hwq; in nbd_handle_reply() local 737 hwq = blk_mq_unique_tag_to_hwq(tag); in nbd_handle_reply() 738 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply() 739 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
|
/linux-6.3-rc2/drivers/ufs/host/ |
A D | ufs-qcom.c | 1556 struct ufs_hw_queue *hwq = &hba->uhq[id]; in ufs_qcom_mcq_esi_handler() local 1559 ufshcd_mcq_poll_cqe_nolock(hba, hwq); in ufs_qcom_mcq_esi_handler()
|
/linux-6.3-rc2/drivers/scsi/ibmvscsi/ |
A D | ibmvfc.h | 760 u16 hwq; member
|