Lines Matching refs:rx

23 static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)  in gve_rx_unfill_pages()  argument
25 u32 slots = rx->mask + 1; in gve_rx_unfill_pages()
28 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
30 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
31 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
34 page_ref_sub(rx->data.page_info[i].page, in gve_rx_unfill_pages()
35 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
36 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_unfill_pages()
37 rx->data.qpl = NULL; in gve_rx_unfill_pages()
39 for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { in gve_rx_unfill_pages()
40 page_ref_sub(rx->qpl_copy_pool[i].page, in gve_rx_unfill_pages()
41 rx->qpl_copy_pool[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
42 put_page(rx->qpl_copy_pool[i].page); in gve_rx_unfill_pages()
45 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
46 rx->data.page_info = NULL; in gve_rx_unfill_pages()
51 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring() local
53 u32 slots = rx->mask + 1; in gve_rx_free_ring()
59 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring()
60 rx->desc.desc_ring = NULL; in gve_rx_free_ring()
62 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring()
63 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring()
64 rx->q_resources = NULL; in gve_rx_free_ring()
66 gve_rx_unfill_pages(priv, rx); in gve_rx_free_ring()
68 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring()
69 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring()
70 rx->data.data_bus); in gve_rx_free_ring()
71 rx->data.data_ring = NULL; in gve_rx_free_ring()
73 kvfree(rx->qpl_copy_pool); in gve_rx_free_ring()
74 rx->qpl_copy_pool = NULL; in gve_rx_free_ring()
108 static int gve_prefill_rx_pages(struct gve_rx_ring *rx) in gve_prefill_rx_pages() argument
110 struct gve_priv *priv = rx->gve; in gve_prefill_rx_pages()
119 slots = rx->mask + 1; in gve_prefill_rx_pages()
121 rx->data.page_info = kvzalloc(slots * in gve_prefill_rx_pages()
122 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_prefill_rx_pages()
123 if (!rx->data.page_info) in gve_prefill_rx_pages()
126 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
127 rx->data.qpl = gve_assign_rx_qpl(priv); in gve_prefill_rx_pages()
128 if (!rx->data.qpl) { in gve_prefill_rx_pages()
129 kvfree(rx->data.page_info); in gve_prefill_rx_pages()
130 rx->data.page_info = NULL; in gve_prefill_rx_pages()
135 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
136 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages()
139 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, in gve_prefill_rx_pages()
140 &rx->data.data_ring[i].qpl_offset); in gve_prefill_rx_pages()
143 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], in gve_prefill_rx_pages()
144 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
149 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
150 for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { in gve_prefill_rx_pages()
158 rx->qpl_copy_pool[j].page = page; in gve_prefill_rx_pages()
159 rx->qpl_copy_pool[j].page_offset = 0; in gve_prefill_rx_pages()
160 rx->qpl_copy_pool[j].page_address = page_address(page); in gve_prefill_rx_pages()
164 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; in gve_prefill_rx_pages()
172 page_ref_sub(rx->qpl_copy_pool[j].page, in gve_prefill_rx_pages()
173 rx->qpl_copy_pool[j].pagecnt_bias - 1); in gve_prefill_rx_pages()
174 put_page(rx->qpl_copy_pool[j].page); in gve_prefill_rx_pages()
179 &rx->data.page_info[i], in gve_prefill_rx_pages()
180 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
195 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring() local
204 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring()
206 rx->gve = priv; in gve_rx_alloc_ring()
207 rx->q_num = idx; in gve_rx_alloc_ring()
210 rx->mask = slots - 1; in gve_rx_alloc_ring()
211 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_rx_alloc_ring()
214 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
215 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring()
216 &rx->data.data_bus, in gve_rx_alloc_ring()
218 if (!rx->data.data_ring) in gve_rx_alloc_ring()
221 rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; in gve_rx_alloc_ring()
222 rx->qpl_copy_pool_head = 0; in gve_rx_alloc_ring()
223 rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, in gve_rx_alloc_ring()
224 sizeof(rx->qpl_copy_pool[0]), in gve_rx_alloc_ring()
227 if (!rx->qpl_copy_pool) { in gve_rx_alloc_ring()
232 filled_pages = gve_prefill_rx_pages(rx); in gve_rx_alloc_ring()
237 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring()
242 rx->q_resources = in gve_rx_alloc_ring()
244 sizeof(*rx->q_resources), in gve_rx_alloc_ring()
245 &rx->q_resources_bus, in gve_rx_alloc_ring()
247 if (!rx->q_resources) { in gve_rx_alloc_ring()
252 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring()
262 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring()
264 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring()
268 rx->cnt = 0; in gve_rx_alloc_ring()
269 rx->db_threshold = priv->rx_desc_cnt / 2; in gve_rx_alloc_ring()
270 rx->desc.seqno = 1; in gve_rx_alloc_ring()
275 rx->packet_buffer_size = PAGE_SIZE / 2; in gve_rx_alloc_ring()
276 gve_rx_ctx_clear(&rx->ctx); in gve_rx_alloc_ring()
282 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring()
283 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring()
284 rx->q_resources = NULL; in gve_rx_alloc_ring()
286 gve_rx_unfill_pages(priv, rx); in gve_rx_alloc_ring()
288 kvfree(rx->qpl_copy_pool); in gve_rx_alloc_ring()
289 rx->qpl_copy_pool = NULL; in gve_rx_alloc_ring()
291 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
292 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring()
293 rx->data.data_ring = NULL; in gve_rx_alloc_ring()
330 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
332 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
334 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
434 static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, in gve_rx_copy_to_pool() argument
438 u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; in gve_rx_copy_to_pool()
441 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_copy_to_pool()
446 copy_page_info = &rx->qpl_copy_pool[pool_idx]; in gve_rx_copy_to_pool()
451 gve_schedule_reset(rx->gve); in gve_rx_copy_to_pool()
465 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
478 rx->packet_buffer_size, in gve_rx_copy_to_pool()
481 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
482 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
483 rx->rx_frag_alloc_cnt++; in gve_rx_copy_to_pool()
484 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
494 rx->packet_buffer_size, len, ctx); in gve_rx_copy_to_pool()
499 copy_page_info->page_offset += rx->packet_buffer_size; in gve_rx_copy_to_pool()
507 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
508 prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); in gve_rx_copy_to_pool()
513 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
514 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
515 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
522 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, in gve_rx_qpl() argument
526 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_qpl()
535 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); in gve_rx_qpl()
543 skb = gve_rx_copy_to_pool(rx, page_info, len, napi); in gve_rx_qpl()
548 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_skb() argument
554 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_skb()
561 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
562 rx->rx_copied_pkt++; in gve_rx_skb()
563 rx->rx_frag_copy_cnt++; in gve_rx_skb()
564 rx->rx_copybreak_pkt++; in gve_rx_skb()
565 u64_stats_update_end(&rx->statss); in gve_rx_skb()
576 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
577 rx->rx_frag_flip_cnt++; in gve_rx_skb()
578 u64_stats_update_end(&rx->statss); in gve_rx_skb()
581 if (rx->data.raw_addressing) { in gve_rx_skb()
585 rx->packet_buffer_size, ctx); in gve_rx_skb()
587 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, in gve_rx_skb()
595 static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, in gve_rx() argument
602 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
604 struct gve_priv *priv = rx->gve; in gve_rx()
609 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
624 if (unlikely(frag_size > rx->packet_buffer_size)) { in gve_rx()
626 frag_size, rx->packet_buffer_size); in gve_rx()
629 gve_schedule_reset(rx->gve); in gve_rx()
634 page_info = &rx->data.page_info[(idx + 2) & rx->mask]; in gve_rx()
640 page_info = &rx->data.page_info[idx]; in gve_rx()
641 data_slot = &rx->data.data_ring[idx]; in gve_rx()
642 page_bus = (rx->data.raw_addressing) ? in gve_rx()
644 rx->data.qpl->page_buses[idx]; in gve_rx()
650 skb = gve_rx_skb(priv, rx, page_info, napi, frag_size, in gve_rx()
653 u64_stats_update_begin(&rx->statss); in gve_rx()
654 rx->rx_skb_alloc_fail++; in gve_rx()
655 u64_stats_update_end(&rx->statss); in gve_rx()
681 skb_record_rx_queue(skb, rx->q_num); in gve_rx()
703 bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
709 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
710 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
714 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
717 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_refill_buffers() argument
719 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
720 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
722 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
724 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
726 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
732 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
747 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
754 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
760 u64_stats_update_begin(&rx->statss); in gve_rx_refill_buffers()
761 rx->rx_buf_alloc_fail++; in gve_rx_refill_buffers()
762 u64_stats_update_end(&rx->statss); in gve_rx_refill_buffers()
769 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
773 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
776 struct gve_rx_ctx *ctx = &rx->ctx; in gve_clean_rx_done()
777 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
780 u32 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
783 struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
786 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
788 next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; in gve_clean_rx_done()
791 gve_rx(rx, feat, desc, idx, &cnts); in gve_clean_rx_done()
793 rx->cnt++; in gve_clean_rx_done()
794 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
795 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
796 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
802 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_clean_rx_done()
805 gve_rx_ctx_clear(&rx->ctx); in gve_clean_rx_done()
807 GVE_SEQNO(desc->flags_seq), rx->desc.seqno); in gve_clean_rx_done()
808 gve_schedule_reset(rx->gve); in gve_clean_rx_done()
811 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) in gve_clean_rx_done()
815 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
816 rx->rpackets += cnts.ok_pkt_cnt; in gve_clean_rx_done()
817 rx->rbytes += cnts.ok_pkt_bytes; in gve_clean_rx_done()
818 rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; in gve_clean_rx_done()
819 rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; in gve_clean_rx_done()
820 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
824 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
826 rx->fill_cnt += work_done; in gve_clean_rx_done()
827 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
831 if (!gve_rx_refill_buffers(priv, rx)) in gve_clean_rx_done()
837 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
838 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
843 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
849 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
860 work_done = gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()