| /linux/drivers/net/ethernet/microsoft/mana/ |
| A D | hw_channel.c | 371 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq() 402 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf() 404 buf_size = PAGE_ALIGN(q_depth * max_msg_size); in mana_hwc_alloc_dma_buf() 416 for (i = 0; i < q_depth; i++) { in mana_hwc_alloc_dma_buf() 485 hwc_wq->queue_depth = q_depth; in mana_hwc_create_wq() 570 for (i = 0; i < q_depth; i++) { in mana_hwc_test_channel() 581 for (i = 0; i < q_depth; ++i) in mana_hwc_test_channel() 614 *q_depth = hwc->hwc_init_q_depth_max; in mana_hwc_establish_channel() 636 err = mana_hwc_init_inflight_msg(hwc, q_depth); in mana_hwc_init_queues() 643 err = mana_hwc_create_cq(hwc, q_depth * 2, in mana_hwc_init_queues() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| A D | hinic_hw_wq.c | 507 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument 526 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate() 550 wq->q_depth = q_depth; in hinic_wq_allocate() 567 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate() 568 wq->mask = q_depth - 1; in hinic_wq_allocate() 605 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument 623 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc() 653 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc() 671 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc() 672 wq[i].mask = q_depth - 1; in hinic_wqs_cmdq_alloc() [all …]
|
| A D | hinic_hw_wq.h | 30 u16 q_depth; member 80 u16 q_depth, u16 max_wqe_size); 91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
|
| A D | hinic_hw_cmdq.c | 368 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() 370 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp() 447 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit() 449 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit() 754 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq() 759 wq->q_depth)); in init_cmdq()
|
| A D | hinic_hw_qp.c | 224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr() 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe() 334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe() 367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
|
| /linux/drivers/net/ethernet/amazon/ena/ |
| A D | ena_eth_com.h | 80 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries() 179 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head() 217 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head() 228 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get() 246 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
|
| A D | ena_eth_com.c | 15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc() 41 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue() 56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev() 84 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev() 221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail() 230 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr() 259 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get() 548 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local 578 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
|
| A D | ena_com.c | 145 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq() 185 command_id, admin_queue->q_depth); in get_comp_ctxt() 226 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd() 1254 create_cmd.sq_depth = io_sq->q_depth; in ena_com_create_io_sq() 1293 io_sq->idx, io_sq->q_depth); in ena_com_create_io_sq() 1404 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq() 1441 io_cq->idx, io_cq->q_depth); in ena_com_create_io_cq() 1542 u16 depth = ena_dev->aenq.q_depth; in ena_com_admin_aenq_enable() 1704 size = ADMIN_AENQ_SIZE(aenq->q_depth); in ena_com_admin_destroy() 1891 io_cq->q_depth = ctx->queue_size; in ena_com_create_io_queue() [all …]
|
| A D | ena_com.h | 130 u16 q_depth; member 174 u16 q_depth; member 223 u16 q_depth; member 253 u16 q_depth; member
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| A D | bnad.c | 91 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument 114 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 126 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 174 q_depth = tcb->q_depth; in bnad_txcmpl_process() 341 u32 alloced, prod, q_depth; in bnad_rxq_refill_page() local 350 q_depth = rcb->q_depth; in bnad_rxq_refill_page() 397 BNA_QE_INDX_INC(prod, q_depth); in bnad_rxq_refill_page() 424 q_depth = rcb->q_depth; in bnad_rxq_refill_skb() 453 BNA_QE_INDX_INC(prod, q_depth); in bnad_rxq_refill_skb() 2920 u32 prod, q_depth, vect_id; in bnad_start_xmit() local [all …]
|
| A D | bna_types.h | 422 u32 q_depth; member 550 u32 q_depth; member 566 int q_depth; member 614 u32 q_depth; member
|
| A D | bfa_msgq.c | 518 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init() 520 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
|
| A D | bfi.h | 413 u16 q_depth; /* Total num of entries in the queue */ member
|
| A D | bna_tx_rx.c | 2385 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2386 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2412 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2413 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create() 3466 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
|
| /linux/drivers/nvme/host/ |
| A D | pci.c | 127 u32 q_depth; member 199 u32 q_depth; member 1470 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local 1485 if (q_depth < 64) in nvme_cmb_qdepth() 1489 return q_depth; in nvme_cmb_qdepth() 1526 nvmeq->q_depth = depth; in nvme_alloc_queue() 2312 dev->q_depth = result; in nvme_setup_io_queues() 2549 dev->q_depth = 2; in nvme_pci_enable() 2552 dev->q_depth); in nvme_pci_enable() 2556 dev->q_depth = 64; in nvme_pci_enable() [all …]
|
| /linux/drivers/block/rsxx/ |
| A D | cregs.c | 133 card->creg_ctrl.q_depth--; in creg_kick_queue() 185 card->creg_ctrl.q_depth++; in creg_queue_cmd() 322 card->creg_ctrl.q_depth--; in creg_reset() 399 card->creg_ctrl.q_depth + 20000); in __issue_creg_rw() 706 card->creg_ctrl.q_depth++; in rsxx_eeh_save_issued_creg()
|
| A D | rsxx_priv.h | 127 unsigned int q_depth; member
|
| A D | dma.c | 190 u32 q_depth = 0; in dma_intr_coal_auto_tune() local 198 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); in dma_intr_coal_auto_tune() 201 q_depth / 2, in dma_intr_coal_auto_tune()
|
| /linux/drivers/scsi/bfa/ |
| A D | bfi.h | 538 #define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci) 540 #define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth) 541 #define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth) 544 #define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1)) 585 u16 q_depth; /* Total num of entries in the queue */ member
|
| A D | bfa_fcpim.h | 119 u16 q_depth; member
|
| A D | bfa_defs_svc.h | 958 u16 q_depth; /* SCSI Queue depth */ member
|
| /linux/net/mac80211/ |
| A D | debugfs_sta.c | 257 u32 q_depth[IEEE80211_NUM_ACS]; in sta_aql_read() local 270 q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending); in sta_aql_read() 276 q_depth[0], q_depth[1], q_depth[2], q_depth[3], in sta_aql_read()
|
| /linux/drivers/scsi/mpi3mr/ |
| A D | mpi3mr_os.c | 704 int q_depth) in mpi3mr_change_queue_depth() argument 711 q_depth = 1; in mpi3mr_change_queue_depth() 712 if (q_depth > shost->can_queue) in mpi3mr_change_queue_depth() 713 q_depth = shost->can_queue; in mpi3mr_change_queue_depth() 714 else if (!q_depth) in mpi3mr_change_queue_depth() 715 q_depth = MPI3MR_DEFAULT_SDEV_QD; in mpi3mr_change_queue_depth() 716 retval = scsi_change_queue_depth(sdev, q_depth); in mpi3mr_change_queue_depth() 741 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); in mpi3mr_update_sdev() 813 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); in mpi3mr_update_tgtdev() 3188 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); in mpi3mr_slave_configure()
|
| A D | mpi3mr.h | 450 u16 q_depth; member
|
| /linux/drivers/s390/crypto/ |
| A D | ap_bus.c | 317 int *q_depth, int *q_ml, bool *q_decfg) in ap_queue_info() argument 357 *q_depth = tapq_info.tapq_gr2.qd; in ap_queue_info()
|