/linux-6.3-rc2/drivers/net/wwan/t7xx/ |
A D | t7xx_hif_dpmaif_rx.c | 83 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_update_bat_wr_idx() local 585 ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); in t7xx_dpmaif_bat_release_and_add() 816 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_napi_rx_data_collect() local 901 rxq = &dpmaif_ctrl->rxq[qno]; in t7xx_dpmaif_irq_rx_done() 1022 rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; in t7xx_dpmaif_rx_alloc() 1025 rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; in t7xx_dpmaif_rx_alloc() 1035 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); in t7xx_dpmaif_rx_buf_free() 1036 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); in t7xx_dpmaif_rx_buf_free() 1041 rxq->pit_base, rxq->pit_bus_addr); in t7xx_dpmaif_rx_buf_free() 1073 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; in t7xx_dpmaif_bat_release_work() [all …]
|
A D | t7xx_hif_dpmaif.c | 267 rx_q = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_rxtx_sw_allocs() 303 struct dpmaif_rx_queue *rxq; in t7xx_dpmaif_start() local 314 rxq = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_start() 315 rxq->que_started = true; in t7xx_dpmaif_start() 316 rxq->index = i; in t7xx_dpmaif_start() 317 rxq->budget = rxq->bat_req->bat_size_cnt - 1; in t7xx_dpmaif_start() 372 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); in t7xx_dpmaif_start() 375 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); in t7xx_dpmaif_start() 432 struct dpmaif_rx_queue *rxq; in t7xx_dpmaif_start_txrx_qs() local 442 rxq = &dpmaif_ctrl->rxq[que_cnt]; in t7xx_dpmaif_start_txrx_qs() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/huawei/hinic/ |
A D | hinic_rx.c | 138 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); in rx_alloc_skb() 447 rx_alloc_pkts(rxq); in rxq_recv() 482 netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll, in rx_add_napi() 525 rx_add_napi(rxq); in rx_request_irq() 545 err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); in rx_request_irq() 559 rx_del_napi(rxq); in rx_request_irq() 569 rx_del_napi(rxq); in rx_free_irq() 587 rxq->rq = rq; in hinic_init_rxq() 591 rxq_stats_init(rxq); in hinic_init_rxq() 595 if (!rxq->irq_name) in hinic_init_rxq() [all …]
|
A D | hinic_rx.h | 44 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); 46 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 49 void hinic_clean_rxq(struct hinic_rxq *rxq);
|
/linux-6.3-rc2/drivers/net/wireless/intel/iwlwifi/pcie/ |
A D | rx.c | 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space() 192 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr() 209 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rxq_check_wrptr() local 238 (u32)rxb->vid, rxq->id, rxq->write); in iwl_pcie_restock_bd() 272 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); in iwl_pcie_rxmq_restock() 311 rxb = rxq->queue[rxq->write]; in iwl_pcie_rxsq_restock() 322 rxq->queue[rxq->write] = rxb; in iwl_pcie_rxsq_restock() 686 rxq->bd, rxq->bd_dma); in iwl_pcie_free_rxq_dma() 697 rxq->used_bd, rxq->used_bd_dma); in iwl_pcie_free_rxq_dma() 749 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_alloc_rxq_dma() local [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/hfi1/ |
A D | netdev_rx.c | 194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init() 197 if (!rx->rxq) { in hfi1_netdev_rxq_init() 203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 210 rxq->rx = rx; in hfi1_netdev_rxq_init() 211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init() 230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 238 kfree(rx->rxq); in hfi1_netdev_rxq_init() 239 rx->rxq = NULL; in hfi1_netdev_rxq_init() 250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_deinit() local 268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in enable_queues() local [all …]
|
A D | vnic_main.c | 295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb() 303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb() 305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb() 336 struct hfi1_vnic_rx_queue *rxq; in hfi1_vnic_bypass_rcv() local 370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv() 389 rc = hfi1_vnic_decap_skb(rxq, skb); in hfi1_vnic_bypass_rcv() 401 napi_gro_receive(&rxq->napi, skb); in hfi1_vnic_bypass_rcv() 599 struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; in hfi1_vnic_alloc_rn() local 601 rxq->idx = i; in hfi1_vnic_alloc_rn() 602 rxq->vinfo = vinfo; in hfi1_vnic_alloc_rn() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/qlogic/qede/ |
A D | qede_fp.c | 57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer() 69 rxq->sw_rx_prod++; in qede_alloc_rx_buffer() 518 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume() 531 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page() 552 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring() 668 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb() 976 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end() local 1090 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp() 1184 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo() 1341 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int() local [all …]
|
A D | qede_main.c | 948 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) in qede_free_fp_array() 950 kfree(fp->rxq); in qede_free_fp_array() 1019 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); in qede_alloc_fp_array() 1020 if (!fp->rxq) in qede_alloc_fp_array() 1491 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { in qede_free_rx_buffers() 1512 kfree(rxq->sw_rx_ring); in qede_free_mem_rxq() 1545 size = rxq->rx_headroom + in qede_alloc_mem_rxq() 1566 if (!rxq->sw_rx_ring) { in qede_alloc_mem_rxq() 1591 rxq->filled_buffers = 0; in qede_alloc_mem_rxq() 1603 qede_set_tpa_param(rxq); in qede_alloc_mem_rxq() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/microsoft/mana/ |
A D | mana_en.c | 1130 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq() 1267 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe() 1290 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe() 1343 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq() local 1588 if (!rxq) in mana_destroy_rxq() 1625 kfree(rxq); in mana_destroy_rxq() 1721 if (!rxq) in mana_create_rxq() 1749 cq->rxq = rxq; in mana_create_rxq() 1783 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq() 1809 return rxq; in mana_create_rxq() [all …]
|
A D | mana_bpf.c | 80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, in mana_run_xdp() argument 88 prog = rcu_dereference(rxq->bpf_prog); in mana_run_xdp() 93 xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); in mana_run_xdp() 98 rx_stats = &rxq->stats; in mana_run_xdp() 107 rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); in mana_run_xdp() 108 if (!rxq->xdp_rc) { in mana_run_xdp() 109 rxq->xdp_flush = true; in mana_run_xdp()
|
/linux-6.3-rc2/drivers/net/ethernet/marvell/ |
A D | mvneta.c | 134 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument 921 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvneta_rxq_next_desc_get() 1494 for (rxq = 0; rxq < rxq_number; rxq++) in mvneta_defaults_set() 2047 rxq->id, i, rxq->refill_num); in mvneta_rx_refill_queue() 3334 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill() 3384 rxq->last_desc = rxq->size - 1; in mvneta_rxq_sw_init() 3397 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init() 3398 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init() 3407 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init() 4280 int rxq; in mvneta_percpu_elect() local [all …]
|
A D | mv643xx_eth.c | 443 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp() 519 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; in rxq_process() 526 skb = rxq->rx_skb[rxq->rx_curr_desc]; in rxq_process() 527 rxq->rx_skb[rxq->rx_curr_desc] = NULL; in rxq_process() 530 if (rxq->rx_curr_desc == rxq->rx_ring_size) in rxq_process() 624 if (rxq->rx_used_desc == rxq->rx_ring_size) in rxq_refill() 1940 struct rx_queue *rxq = mp->rxq + index; in rxq_init() local 1973 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), in rxq_init() 2010 rxq_disable(rxq); in rxq_deinit() 2029 rxq->rx_desc_area, rxq->rx_desc_dma); in rxq_deinit() [all …]
|
/linux-6.3-rc2/tools/testing/selftests/bpf/ |
A D | xdp_hw_metadata.c | 52 int rxq; variable 195 struct pollfd fds[rxq + 1]; in verify_metadata() 202 for (i = 0; i < rxq; i++) { in verify_metadata() 208 fds[rxq].fd = server_fd; in verify_metadata() 209 fds[rxq].events = POLLIN; in verify_metadata() 210 fds[rxq].revents = 0; in verify_metadata() 221 if (fds[rxq].revents) in verify_metadata() 344 for (i = 0; i < rxq; i++) in cleanup() 380 rxq = rxq_num(ifname); in main() 382 printf("rxq: %d\n", rxq); in main() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/atheros/alx/ |
A D | main.c | 74 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; in alx_refill_rx_ring() local 229 rrd = &rxq->rrd[rxq->rrd_read_idx]; in alx_clean_rx_irq() 242 rxb = &rxq->bufs[rxq->read_idx]; in alx_clean_rx_irq() 282 if (++rxq->read_idx == rxq->count) in alx_clean_rx_irq() 284 if (++rxq->rrd_read_idx == rxq->count) in alx_clean_rx_irq() 308 if (np->rxq) in alx_poll() 468 if (np->rxq) { in alx_init_ring_ptrs() 512 if (!rxq->bufs) in alx_free_rxring_buf() 779 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); in alx_alloc_napis() 780 if (!rxq) in alx_alloc_napis() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/hisilicon/ |
A D | hisi_femac.c | 212 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx_refill() local 218 pos = rxq->head; in hisi_femac_rx_refill() 220 if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) in hisi_femac_rx_refill() 238 rxq->skb[pos] = skb; in hisi_femac_rx_refill() 242 rxq->head = pos; in hisi_femac_rx_refill() 248 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx() local 253 pos = rxq->tail; in hisi_femac_rx() 292 rxq->tail = pos; in hisi_femac_rx() 388 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_free_skb_rings() local 393 pos = rxq->tail; in hisi_femac_free_skb_rings() [all …]
|
/linux-6.3-rc2/drivers/net/wireless/intel/iwlegacy/ |
A D | 3945-mac.c | 929 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_queue_restock() local 942 rxq->bd[rxq->write] = in il3945_rx_queue_restock() 944 rxq->queue[rxq->write] = rxb; in il3945_rx_queue_restock() 945 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il3945_rx_queue_restock() 956 if (rxq->write_actual != (rxq->write & ~0x7) || in il3945_rx_queue_restock() 957 abs(rxq->write - rxq->read) > 7) { in il3945_rx_queue_restock() 976 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_allocate() local 1070 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il3945_rx_queue_reset() 1075 rxq->read = rxq->write = 0; in il3945_rx_queue_reset() 1125 rxq->rb_stts, rxq->rb_stts_dma); in il3945_rx_queue_free() [all …]
|
/linux-6.3-rc2/samples/bpf/ |
A D | xdp_rxq_info_user.c | 203 struct record *rxq; member 243 rec->rxq = alloc_record_per_rxq(); in alloc_stats_record() 245 rec->rxq[i].cpu = alloc_record_per_cpu(); in alloc_stats_record() 257 free(r->rxq[i].cpu); in free_stats_record() 259 free(r->rxq); in free_stats_record() 303 map_collect_percpu(fd, i, &rec->rxq[i]); in stats_collect() 352 int rxq; in stats_print() local 392 for (rxq = 0; rxq < nr_rxqs; rxq++) { in stats_print() 396 int rxq_ = rxq; in stats_print() 402 rec = &stats_rec->rxq[rxq]; in stats_print() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/fungible/funeth/ |
A D | funeth_trace.h | 75 TP_PROTO(const struct funeth_rxq *rxq, 81 TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec), 90 __string(devname, rxq->netdev->name) 94 __entry->qidx = rxq->qidx; 95 __entry->cq_head = rxq->cq_head; 100 __assign_str(devname, rxq->netdev->name);
|
/linux-6.3-rc2/drivers/vhost/ |
A D | net.c | 154 if (rxq->tail != rxq->head) in vhost_net_buf_get_ptr() 155 return rxq->queue[rxq->head]; in vhost_net_buf_get_ptr() 162 return rxq->tail - rxq->head; in vhost_net_buf_get_size() 167 return rxq->tail == rxq->head; in vhost_net_buf_is_empty() 173 ++rxq->head; in vhost_net_buf_consume() 179 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_produce() local 189 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_unproduce() local 192 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, in vhost_net_buf_unproduce() 195 rxq->head = rxq->tail = 0; in vhost_net_buf_unproduce() 212 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_peek() local [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/chelsio/cxgb4vf/ |
A D | cxgb4vf_main.c | 383 int rxq, msi, err; in request_msix_queue_irqs() local 409 while (--rxq >= 0) in request_msix_queue_irqs() 421 int rxq, msi; in free_msix_queue_irqs() local 452 int rxq; in enable_rx() local 477 int rxq; in quiesce_rx() local 650 memset(&rxq->stats, 0, sizeof(rxq->stats)); in setup_sge_queues() 668 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; in setup_sge_queues() 680 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; in setup_sge_queues() 681 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; in setup_sge_queues() 1242 rxq++; in cxgb4vf_poll_controller() [all …]
|
A D | sge.c | 1591 rxq->stats.vlan_ex++; in do_gro() 1599 rxq->stats.pkts++; in do_gro() 1600 rxq->stats.rx_cso++; in do_gro() 1630 do_gro(rxq, gl, pkt); in t4vf_ethrx_handler() 1647 rxq->stats.pkts++; in t4vf_ethrx_handler() 1653 rxq->stats.rx_cso++; in t4vf_ethrx_handler() 1658 rxq->stats.rx_cso++; in t4vf_ethrx_handler() 1807 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses() 1869 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) in process_responses() 2571 if (rxq->rspq.desc) in t4vf_free_sge_resources() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/freescale/ |
A D | fec_main.c | 446 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); in fec_enet_create_page_pool() 451 rxq->page_pool); in fec_enet_create_page_pool() 461 rxq->page_pool = NULL; in fec_enet_create_page_pool() 921 bdp = rxq->bd.base; in fec_enet_bd_init() 937 rxq->bd.cur = rxq->bd.base; in fec_enet_bd_init() 1610 bdp = rxq->bd.cur; in fec_enet_rx_queue() 1771 rxq->bd.cur = bdp; in fec_enet_rx_queue() 2893 rxq->stats[j] = 0; in fec_enet_clear_ethtool_stats() 3352 bdp = rxq->bd.base; in fec_enet_alloc_rxq_buffers() 3354 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); in fec_enet_alloc_rxq_buffers() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/alacritech/ |
A D | slicoss.c | 122 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); in slic_get_free_rx_descs() 396 struct slic_rx_queue *rxq = &sdev->rxq; in slic_refill_rx_queue() local 437 buff = &rxq->rxbuffs[rxq->put_idx]; in slic_refill_rx_queue() 446 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); in slic_refill_rx_queue() 549 struct slic_rx_queue *rxq = &sdev->rxq; in slic_handle_receive() local 559 while (todo && (rxq->done_idx != rxq->put_idx)) { in slic_handle_receive() 560 buff = &rxq->rxbuffs[rxq->done_idx]; in slic_handle_receive() 613 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); in slic_handle_receive() 918 struct slic_rx_queue *rxq = &sdev->rxq; in slic_init_rx_queue() local 923 rxq->put_idx = 0; in slic_init_rx_queue() [all …]
|
/linux-6.3-rc2/tools/testing/selftests/bpf/progs/ |
A D | test_xdp_bpf2bpf.c | 29 struct xdp_rxq_info *rxq; member 51 meta.ifindex = xdp->rxq->dev->ifindex; in BPF_PROG() 58 test_result_fentry = xdp->rxq->dev->ifindex; in BPF_PROG()
|