| /drivers/net/ethernet/intel/libeth/ |
| A D | rx.c | 80 switch (fq->type) { in libeth_rx_page_pool_params() 82 fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len); in libeth_rx_page_pool_params() 85 fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len, in libeth_rx_page_pool_params() 124 switch (fq->type) { in libeth_rx_page_pool_params_zc() 140 fq->truesize = fq->buf_len; in libeth_rx_page_pool_params_zc() 158 .nid = fq->nid, in libeth_rx_fq_create() 169 if (!fq->hsplit) in libeth_rx_fq_create() 180 fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid); in libeth_rx_fq_create() 190 fq->fqes = fqes; in libeth_rx_fq_create() 191 fq->pp = pool; in libeth_rx_fq_create() [all …]
|
| A D | xsk.c | 160 int libeth_xskfq_create(struct libeth_xskfq *fq) in libeth_xskfq_create() argument 162 fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL, in libeth_xskfq_create() 163 fq->nid); in libeth_xskfq_create() 164 if (!fq->fqes) in libeth_xskfq_create() 167 fq->pending = fq->count; in libeth_xskfq_create() 168 fq->thresh = libeth_xdp_queue_threshold(fq->count); in libeth_xskfq_create() 169 fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool); in libeth_xskfq_create() 183 fq->buf_len = 0; in libeth_xskfq_destroy() 184 fq->thresh = 0; in libeth_xskfq_destroy() 185 fq->pending = 0; in libeth_xskfq_destroy() [all …]
|
| /drivers/soc/fsl/qbman/ |
| A D | qman.c | 1517 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1524 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1536 fq->cb.ern(p, fq, msg); in qm_mr_process_task() 1822 fq_table[fq->idx] = fq; in qman_create_fq() 1890 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq() 1990 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq() 2033 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq() 2078 fq->cb.fqs(p, fq, &msg); in qman_retire_fq() 2112 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq() 2138 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq() [all …]
|
| A D | qman_test_api.c | 112 if (qman_enqueue(fq, &fd)) { in do_enqueues() 126 struct qman_fq *fq = &fq_base; in qman_test_api() local 133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api() 144 err = do_enqueues(fq); in qman_test_api() 154 err = do_enqueues(fq); in qman_test_api() 173 err = do_enqueues(fq); in qman_test_api() 177 err = qman_schedule_fq(fq); in qman_test_api() 185 err = qman_retire_fq(fq, &flags); in qman_test_api() 196 err = qman_oos_fq(fq); in qman_test_api() 201 qman_destroy_fq(fq); in qman_test_api() [all …]
|
| A D | qman_test_stash.c | 275 struct qman_fq *fq, in normal_dqrr() argument 279 struct hp_handler *handler = (struct hp_handler *)fq; in normal_dqrr() 294 struct qman_fq *fq, in special_dqrr() argument 298 struct hp_handler *handler = (struct hp_handler *)fq; in special_dqrr()
|
| A D | qman_priv.h | 216 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
|
| /drivers/crypto/caam/ |
| A D | qi.c | 253 ret = qman_retire_fq(fq, &flags); in kill_fq() 278 fq->fqid); in kill_fq() 283 ret = qman_oos_fq(fq); in kill_fq() 287 qman_destroy_fq(fq); in kill_fq() 288 kfree(fq); in kill_fq() 301 ret = qman_query_fq_np(fq, &np); in empty_caam_fq() 619 struct qman_fq *fq; in alloc_rsp_fq_cpu() local 622 fq = kzalloc(sizeof(*fq), GFP_KERNEL); in alloc_rsp_fq_cpu() 623 if (!fq) in alloc_rsp_fq_cpu() 632 kfree(fq); in alloc_rsp_fq_cpu() [all …]
|
| /drivers/net/ethernet/freescale/dpaa/ |
| A D | dpaa_eth_trace.h | 32 struct qman_fq *fq, 36 TP_ARGS(netdev, fq, fd), 54 __entry->fqid = fq->fqid; 78 struct qman_fq *fq, 81 TP_ARGS(netdev, fq, fd) 88 struct qman_fq *fq, 91 TP_ARGS(netdev, fq, fd) 98 struct qman_fq *fq, 101 TP_ARGS(netdev, fq, fd)
|
| A D | dpaa_eth_sysfs.c | 33 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local 39 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids() 40 switch (fq->fq_type) { in dpaa_eth_show_fqids() 66 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids() 77 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids() 79 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 81 first_fqid = fq->fqid; in dpaa_eth_show_fqids() 82 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 85 prev = fq; in dpaa_eth_show_fqids()
|
| A D | dpaa_eth.c | 658 fq->wq = 1; in dpaa_assign_wq() 662 fq->wq = 5; in dpaa_assign_wq() 666 fq->wq = 6; in dpaa_assign_wq() 672 fq->wq = 6; in dpaa_assign_wq() 676 fq->wq = 2; in dpaa_assign_wq() 680 fq->wq = 1; in dpaa_assign_wq() 684 fq->wq = 0; in dpaa_assign_wq() 693 fq->fq_type, fq->fqid); in dpaa_assign_wq() 958 struct dpaa_fq *fq; in dpaa_fq_setup() local 1031 struct qman_fq *fq; in dpaa_fq_init() local [all …]
|
| /drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-eth-debugfs.c | 47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument 49 switch (fq->type) { in fq_type_to_str() 62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local 71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show() 72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show() 77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show() 81 fq->fqid, in dpaa2_dbg_fqs_show() 82 fq->target_cpu, in dpaa2_dbg_fqs_show() 83 fq->tc, in dpaa2_dbg_fqs_show() 84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show() [all …]
|
| A D | dpaa2-eth.c | 415 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue() 739 fq->consume(priv, ch, fd, fq); in dpaa2_eth_consume_frames() 1483 fq = &priv->fq[queue_mapping]; in __dpaa2_eth_tx() 2090 fq = &priv->fq[i]; in dpaa2_eth_set_rx_taildrop() 2095 fq->tc, fq->flowid, &td); in dpaa2_eth_set_rx_taildrop() 2236 fq = &priv->fq[i]; in dpaa2_eth_ingress_fq_count() 2880 fq = &priv->fq[smp_processor_id()]; in dpaa2_eth_xdp_xmit() 2923 fq = &priv->fq[i % num_queues]; in update_xps() 3322 fq = &priv->fq[i]; in dpaa2_eth_set_fq_affinity() 3664 fq = &priv->fq[i]; in dpaa2_eth_update_tx_fqids() [all …]
|
| A D | dpaa2-xsk.c | 17 struct dpaa2_eth_fq *fq; in dpaa2_eth_setup_consume_func() local 21 fq = &priv->fq[i]; in dpaa2_eth_setup_consume_func() 23 if (fq->type != type) in dpaa2_eth_setup_consume_func() 25 if (fq->channel != ch) in dpaa2_eth_setup_consume_func() 28 fq->consume = consume; in dpaa2_eth_setup_consume_func() 106 struct dpaa2_eth_fq *fq) in dpaa2_xsk_rx() argument 143 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); in dpaa2_xsk_rx() 399 struct dpaa2_eth_fq *fq; in dpaa2_xsk_tx() local 408 fq = &priv->fq[ch->nctx.desired_cpu]; in dpaa2_xsk_tx() 431 err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, in dpaa2_xsk_tx() [all …]
|
| A D | dpaa2-switch.c | 2532 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2536 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2765 ethsw->fq[i].store = in dpaa2_switch_alloc_rings() 2768 if (!ethsw->fq[i].store) { in dpaa2_switch_alloc_rings() 2795 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); in dpaa2_switch_pull_fq() 2840 struct dpaa2_switch_fq *fq; in dpaa2_switch_poll() local 2880 struct dpaa2_switch_fq *fq; in dpaa2_switch_fqdan_cb() local 2884 napi_schedule(&fq->napi); in dpaa2_switch_fqdan_cb() 2894 nctx = ðsw->fq[i].nctx; in dpaa2_switch_setup_dpio() 2901 nctx->id = ethsw->fq[i].fqid; in dpaa2_switch_setup_dpio() [all …]
|
| A D | dpaa2-eth.h | 455 struct dpaa2_eth_fq *fq); 553 struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; member 555 struct dpaa2_eth_fq *fq, 823 struct dpaa2_eth_fq *fq, 830 struct dpaa2_eth_fq *fq); 851 struct dpaa2_eth_fq *fq,
|
| A D | dpaa2-switch.h | 184 struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS]; member
|
| A D | dpaa2-ethtool.c | 308 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, in dpaa2_eth_get_ethtool_stats() 315 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { in dpaa2_eth_get_ethtool_stats()
|
| /drivers/iommu/ |
| A D | dma-iommu.c | 122 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask) 127 return (((fq->tail + 1) & fq->mod_mask) == fq->head); in fq_full() 136 fq->tail = (idx + 1) & fq->mod_mask; in fq_ring_add() 160 fq->head = (fq->head + 1) & fq->mod_mask; in fq_ring_free_locked() 201 struct iova_fq *fq; in queue_iova() local 215 fq = cookie->single_fq; in queue_iova() 228 if (fq_full(fq)) { in queue_iova() 233 idx = fq_ring_add(fq); in queue_iova() 255 vfree(fq); in iommu_dma_free_fq_single() 289 fq->head = 0; in iommu_dma_init_one_fq() [all …]
|
| /drivers/net/ethernet/intel/idpf/ |
| A D | idpf_txrx.c | 401 struct libeth_fq fq = { in idpf_rx_hdr_buf_rel_all() local 420 struct libeth_fq fq = { in idpf_rx_buf_rel_bufq() local 447 struct libeth_fq fq = { in idpf_rx_buf_rel_all() local 597 bufq->hdr_pp = fq.pp; in idpf_rx_hdr_buf_alloc_all() 647 fq.pp = bufq->hdr_pp; in idpf_rx_post_buf_desc() 658 fq.pp = bufq->pp; in idpf_rx_post_buf_desc() 659 fq.fqes = bufq->buf; in idpf_rx_post_buf_desc() 738 rxq->pp = fq.pp; in idpf_rx_bufs_init_singleq() 739 rxq->rx_buf = fq.fqes; in idpf_rx_bufs_init_singleq() 796 bufq->pp = fq.pp; in idpf_rx_bufs_init() [all …]
|
| A D | idpf_singleq_txrx.c | 847 const struct libeth_fq_fp fq = { in idpf_rx_singleq_buf_hw_alloc_all() local 863 addr = libeth_rx_alloc(&fq, nta); in idpf_rx_singleq_buf_hw_alloc_all()
|
| /drivers/net/ethernet/intel/iavf/ |
| A D | iavf_txrx.c | 744 struct libeth_fq fq = { in iavf_free_rx_resources() local 757 libeth_rx_fq_destroy(&fq); in iavf_free_rx_resources() 770 struct libeth_fq fq = { in iavf_setup_rx_descriptors() local 777 ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); in iavf_setup_rx_descriptors() 781 rx_ring->pp = fq.pp; in iavf_setup_rx_descriptors() 782 rx_ring->rx_fqes = fq.fqes; in iavf_setup_rx_descriptors() 783 rx_ring->truesize = fq.truesize; in iavf_setup_rx_descriptors() 784 rx_ring->rx_buf_len = fq.buf_len; in iavf_setup_rx_descriptors() 806 libeth_rx_fq_destroy(&fq); in iavf_setup_rx_descriptors() 861 const struct libeth_fq_fp fq = { in iavf_alloc_rx_buffers() local [all …]
|
| /drivers/net/ethernet/marvell/mvpp2/ |
| A D | mvpp2.h | 799 #define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \ argument 828 #define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \ argument 830 #define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \ argument 832 #define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq)) argument
|
| A D | mvpp2_main.c | 799 int fq = port->first_rxq; in mvpp2_rxq_enable_fc() local 817 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); in mvpp2_rxq_enable_fc() 819 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); in mvpp2_rxq_enable_fc() 822 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); in mvpp2_rxq_enable_fc() 823 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) in mvpp2_rxq_enable_fc() 841 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) in mvpp2_rxq_enable_fc() 844 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); in mvpp2_rxq_enable_fc() 861 int fq = port->first_rxq; in mvpp2_rxq_disable_fc() local 878 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); in mvpp2_rxq_disable_fc() 880 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); in mvpp2_rxq_disable_fc() [all …]
|
| /drivers/media/i2c/ |
| A D | hi846.c | 1974 s64 fq; in hi846_parse_dt() local 2006 fq = hi846_check_link_freqs(hi846, &bus_cfg); in hi846_parse_dt() 2007 if (fq) { in hi846_parse_dt() 2008 dev_err(dev, "Link frequency of %lld is not supported\n", fq); in hi846_parse_dt()
|
| A D | imx290.c | 1528 s64 fq; in imx290_parse_dt() local 1565 fq = imx290_check_link_freqs(imx290, &ep); in imx290_parse_dt() 1566 if (fq) { in imx290_parse_dt() 1568 fq); in imx290_parse_dt()
|