Home
last modified time | relevance | path

Searched refs:rq_depth (Results 1 – 25 of 26) sorted by relevance

12

/linux-6.3-rc2/block/
A Dblk-wbt.c94 struct rq_depth rq_depth; member
316 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded()
369 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step()
379 } else if (rwb->rq_depth.max_depth <= 2) { in calc_wb_limits()
380 rwb->wb_normal = rwb->rq_depth.max_depth; in calc_wb_limits()
390 if (!rq_depth_scale_up(&rwb->rq_depth)) in scale_up()
409 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer()
434 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn()
491 struct rq_depth *rqd = &rwb->rq_depth; in wbt_update_limits()
567 limit = rwb->rq_depth.max_depth; in get_limit()
[all …]
A Dblk-rq-qos.h51 struct rq_depth { struct
99 bool rq_depth_scale_up(struct rq_depth *rqd);
100 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
101 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
A Dblk-rq-qos.c110 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth()
158 bool rq_depth_scale_up(struct rq_depth *rqd) in rq_depth_scale_up()
177 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) in rq_depth_scale_down()
/linux-6.3-rc2/drivers/net/ethernet/fungible/funcore/
A Dfun_queue.c175 for (i = 0; i < funq->rq_depth; i++) { in fun_clean_rq()
192 for (i = 0; i < funq->rq_depth; i++) { in fun_fill_rq()
209 funq->rq_tail = funq->rq_depth - 1; in fun_fill_rq()
222 if (++funq->rq_buf_idx == funq->rq_depth) in fun_rq_update_pos()
344 funq->rq_depth; in fun_process_cq()
396 fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes), in fun_free_queue()
419 if (req->rq_depth) { in fun_alloc_queue()
458 if (req->rq_depth) { in fun_alloc_queue()
460 funq->rq_depth = req->rq_depth; in fun_alloc_queue()
529 funq->rq_depth, funq->rq_dma_addr, 0, 0, in fun_create_rq()
[all …]
A Dfun_queue.h47 u32 rq_depth; member
124 u32 rq_depth; member
A Dfun_dev.c233 .rq_depth = areq->rq_depth, in fun_enable_admin_queue()
281 if (areq->rq_depth) { in fun_enable_admin_queue()
583 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) in fun_get_dev_limits()
A Dfun_dev.h94 u16 rq_depth; member
/linux-6.3-rc2/net/9p/
A Dtrans_rdma.c85 int rq_depth; member
126 int rq_depth; member
158 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
159 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
183 opts->rq_depth = P9_RDMA_RQ_DEPTH; in parse_opts()
220 opts->rq_depth = option; in parse_opts()
233 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts()
582 rdma->rq_depth = opts->rq_depth; in alloc_rdma()
587 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
698 opts.sq_depth + opts.rq_depth + 1, in rdma_create_trans()
[all …]
/linux-6.3-rc2/net/sunrpc/xprtrdma/
A Dsvc_rdma_transport.c371 unsigned int ctxts, rq_depth; in svc_rdma_accept() local
406 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests + in svc_rdma_accept()
408 if (rq_depth > dev->attrs.max_qp_wr) { in svc_rdma_accept()
411 rq_depth = dev->attrs.max_qp_wr; in svc_rdma_accept()
413 newxprt->sc_max_requests = rq_depth - 2; in svc_rdma_accept()
419 newxprt->sc_sq_depth = rq_depth + ctxts; in svc_rdma_accept()
437 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); in svc_rdma_accept()
447 qp_attr.cap.max_recv_wr = rq_depth; in svc_rdma_accept()
/linux-6.3-rc2/drivers/net/ethernet/fungible/funeth/
A Dfuneth.h76 unsigned int rq_depth; member
116 unsigned int rq_depth; member
A Dfuneth_ethtool.c566 ring->rx_pending = fp->rq_depth; in fun_get_ringparam()
594 fp->rq_depth == ring->rx_pending) in fun_set_ringparam()
600 .rq_depth = ring->rx_pending, in fun_set_ringparam()
610 fp->rq_depth = ring->rx_pending; in fun_set_ringparam()
611 fp->cq_depth = 2 * fp->rq_depth; in fun_set_ringparam()
A Dfuneth_main.c509 qset->rq_depth, qset->rxq_start, qset->state); in fun_alloc_rings()
842 .rq_depth = fp->rq_depth, in funeth_open()
1643 .rq_depth = fp->rq_depth, in fun_change_num_queues()
1783 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); in fun_create_netdev()
2003 .rq_depth = ADMIN_RQ_DEPTH, in funeth_probe()
/linux-6.3-rc2/drivers/net/ethernet/huawei/hinic/
A Dhinic_hw_dev.c271 unsigned int rq_depth) in set_hw_ioctxt() argument
285 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt()
438 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument
462 func_to_io->rq_depth = rq_depth; in hinic_hwdev_ifup()
489 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
A Dhinic_hw_io.h74 u16 rq_depth; member
A Dhinic_dev.h100 u16 rq_depth; member
A Dhinic_hw_dev.h286 u16 rq_depth; member
627 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
A Dhinic_ethtool.c557 ring->rx_pending = nic_dev->rq_depth; in hinic_get_ringparam()
600 new_rq_depth == nic_dev->rq_depth) in hinic_set_ringparam()
605 nic_dev->sq_depth, nic_dev->rq_depth, in hinic_set_ringparam()
609 nic_dev->rq_depth = new_rq_depth; in hinic_set_ringparam()
A Dhinic_hw_mbox.c1289 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
1290 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
1300 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
A Dhinic_hw_io.c293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); in init_qp()
A Dhinic_main.c423 nic_dev->rq_depth); in hinic_open()
1213 nic_dev->rq_depth = HINIC_RQ_DEPTH; in nic_dev_init()
A Dhinic_port.c494 rq_num.rq_depth = ilog2(nic_dev->rq_depth); in hinic_set_max_qnum()
A Dhinic_port.h317 u32 rq_depth; member
/linux-6.3-rc2/drivers/infiniband/hw/efa/
A Defa_com_cmd.h26 u32 rq_depth; member
A Defa_com_cmd.c32 params->rq_depth; in efa_com_create_qp()
/linux-6.3-rc2/drivers/infiniband/hw/hns/
A Dhns_roce_hw_v2.h1219 __le16 rq_depth; member

Completed in 75 milliseconds

12